From 6669aa3b14d98d90929ca860421e5308374b0f46 Mon Sep 17 00:00:00 2001 From: Roland Shoemaker Date: Wed, 21 May 2025 02:03:44 +0000 Subject: runtime: randomize heap base address During initialization, allow randomizing the heap base address by generating a random uint64 and using its bits to randomize various portions of the heap base address. We use the following method to randomize the base address: * We first generate a random heapArenaBytes aligned address that we use for generating the hints. * On the first call to mheap.grow, we then generate a random PallocChunkBytes aligned offset into the mmap'd heap region, which we use as the base for the heap region. * We then mark a random number of pages within the page allocator as allocated. Our final randomized "heap base address" becomes the first byte of the first available page returned by the page allocator. This results in an address with at least heapAddrBits-gc.PageShift-1 bits of entropy. Fixes #27583 Change-Id: Ideb4450a5ff747a132f702d563d2a516dec91a88 Reviewed-on: https://go-review.googlesource.com/c/go/+/674835 Reviewed-by: Michael Knyszek LUCI-TryBot-Result: Go LUCI --- src/runtime/export_test.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'src/runtime/export_test.go') diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 9a4611e26e..2fc5b4a38a 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -9,6 +9,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/goexperiment" "internal/goos" "internal/runtime/atomic" "internal/runtime/gc" @@ -417,7 +418,8 @@ func ReadMemStatsSlow() (base, slow MemStats) { slow.HeapReleased += uint64(pg) * pageSize } for _, p := range allp { - pg := sys.OnesCount64(p.pcache.scav) + // Only count scav bits for pages in the cache + pg := sys.OnesCount64(p.pcache.cache & p.pcache.scav) slow.HeapReleased += uint64(pg) * pageSize } @@ -1120,12 +1122,16 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { // Lock so that we can safely access the bitmap. lock(&mheap_.lock) + + heapBase := mheap_.pages.inUse.ranges[0].base.addr() + secondArenaBase := arenaBase(arenaIndex(heapBase) + 1) chunkLoop: for i := mheap_.pages.start; i < mheap_.pages.end; i++ { chunk := mheap_.pages.tryChunkOf(i) if chunk == nil { continue } + cb := chunkBase(i) for j := 0; j < pallocChunkPages/64; j++ { // Run over each 64-bit bitmap section and ensure // scavenged is being cleared properly on allocation. @@ -1135,12 +1141,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { want := chunk.scavenged[j] &^ chunk.pallocBits[j] got := chunk.scavenged[j] if want != got { + // When goexperiment.RandomizedHeapBase64 is set we use a + // series of padding pages to generate randomized heap base + // address which have both the alloc and scav bits set. If + // we see this for a chunk between the address of the heap + // base, and the address of the second arena continue. + if goexperiment.RandomizedHeapBase64 && (cb >= heapBase && cb < secondArenaBase) { + continue + } ok = false if n >= len(mismatches) { break chunkLoop } mismatches[n] = BitsMismatch{ - Base: chunkBase(i) + uintptr(j)*64*pageSize, + Base: cb + uintptr(j)*64*pageSize, Got: got, Want: want, } -- cgit v1.3-6-g1900 From f7d167fe710501f687fad006125f4ec60be42c35 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Sat, 14 Jun 2025 17:06:20 -0700 Subject: internal/abi: move direct/indirect flag from Kind to TFlag This info makes more sense in the flags instead of as a high bit of the kind. This makes kind access simpler because we now don't need to mask anything. Cleaned up most direct field accesses to use methods instead. (reflect making new types is the only remaining direct accessor.) IfaceIndir -> !IsDirectIface everywhere. gocore has been updated to handle the new location. So has delve. TODO: any other tools need updating? Change-Id: I123f97a4d4bdd0bff1641ee7e276d1cc0bd7e8eb Reviewed-on: https://go-review.googlesource.com/c/go/+/681936 Reviewed-by: Keith Randall Reviewed-by: David Chase LUCI-TryBot-Result: Go LUCI --- src/cmd/compile/internal/reflectdata/reflect.go | 6 +++--- src/cmd/compile/internal/test/inl_test.go | 2 +- src/cmd/link/internal/ld/decodesym.go | 2 +- src/internal/abi/type.go | 28 ++++++++++++------------- src/internal/reflectlite/export_test.go | 2 +- src/internal/reflectlite/value.go | 6 +++--- src/internal/runtime/maps/map.go | 2 +- src/reflect/abi.go | 2 +- src/reflect/badlinkname.go | 2 +- src/reflect/export_noswiss_test.go | 3 +-- src/reflect/map_swiss.go | 2 +- src/reflect/type.go | 20 +++++++++--------- src/reflect/value.go | 26 +++++++++++------------ src/runtime/alg.go | 10 ++++----- src/runtime/arena.go | 10 ++++----- src/runtime/cgocall.go | 22 +++++++++---------- src/runtime/debuglog.go | 2 +- src/runtime/error.go | 2 +- src/runtime/export_debug_test.go | 4 ++-- src/runtime/export_test.go | 2 +- src/runtime/heapdump.go | 2 +- src/runtime/mbitmap.go | 4 ++-- src/runtime/mfinal.go | 10 ++++----- src/runtime/pinner.go | 2 +- src/runtime/plugin.go | 2 +- src/runtime/race.go | 4 ++-- src/runtime/syscall_windows.go | 8 +++---- src/runtime/type.go | 6 +++--- src/runtime/typekind.go | 12 ----------- 29 files changed, 95 insertions(+), 110 deletions(-) delete mode 100644 src/runtime/typekind.go (limited to 'src/runtime/export_test.go') diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index fb60569670..62cef16816 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -491,6 +491,9 @@ func dcommontype(c rttype.Cursor, t *types.Type) { exported = types.IsExported(t.Elem().Sym().Name) } } + if types.IsDirectIface(t) { + tflag |= abi.TFlagDirectIface + } if tflag != abi.TFlag(uint8(tflag)) { // this should optimize away completely @@ -511,9 +514,6 @@ func dcommontype(c rttype.Cursor, t *types.Type) { c.Field("FieldAlign_").WriteUint8(uint8(t.Alignment())) kind := kinds[t.Kind()] - if types.IsDirectIface(t) { - kind |= abi.KindDirectIface - } c.Field("Kind_").WriteUint8(uint8(kind)) c.Field("Equal").WritePtr(eqfunc) diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go index 091933e1e7..ace95f78a6 100644 --- a/src/cmd/compile/internal/test/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -47,7 +47,6 @@ func TestIntendedInlining(t *testing.T) { "getMCache", "heapSetTypeNoHeader", "heapSetTypeSmallHeader", - "isDirectIface", "itabHashFunc", "nextslicecap", "noescape", @@ -109,6 +108,7 @@ func TestIntendedInlining(t *testing.T) { "(*Buffer).tryGrowByReslice", }, "internal/abi": { + "(*Type).IsDirectIface", "UseInterfaceSwitchCache", }, "internal/runtime/math": { diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 4d38ec1a77..248205539d 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -38,7 +38,7 @@ func uncommonSize(arch *sys.Arch) int { return int(abi.UncommonSize()) } // Type.commonType.kind func decodetypeKind(arch *sys.Arch, p []byte) abi.Kind { - return abi.Kind(p[2*arch.PtrSize+7]) & abi.KindMask // 0x13 / 0x1f + return abi.Kind(p[2*arch.PtrSize+7]) // 0x13 } // Type.commonType.size diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 786fb9cd66..29e17cf6a2 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -24,7 +24,7 @@ type Type struct { TFlag TFlag // extra type information flags Align_ uint8 // alignment of variable with this type FieldAlign_ uint8 // alignment of struct field with this type - Kind_ Kind // enumeration for C + Kind_ Kind // what kind of type this is (string, int, ...) // function for comparing objects of this type // (ptr to object A, ptr to object B) -> ==? Equal func(unsafe.Pointer, unsafe.Pointer) bool @@ -78,12 +78,6 @@ const ( UnsafePointer ) -const ( - // TODO (khr, drchase) why aren't these in TFlag? Investigate, fix if possible. - KindDirectIface Kind = 1 << 5 - KindMask Kind = (1 << 5) - 1 -) - // TFlag is used by a Type to signal what extra type information is // available in the memory directly following the Type value. type TFlag uint8 @@ -125,6 +119,15 @@ const ( // has type **byte instead of *byte. The runtime will store a // pointer to the GC pointer bitmask in *GCData. TFlagGCMaskOnDemand TFlag = 1 << 4 + + // TFlagDirectIface means that a value of this type is stored directly + // in the data field of an interface, instead of indirectly. Normally + // this means the type is pointer-ish. + TFlagDirectIface TFlag = 1 << 5 + + // Leaving this breadcrumb behind for dlv. It should not be used, and no + // Kind should be big enough to set this bit. + KindDirectIface Kind = 1 << 5 ) // NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime. @@ -190,7 +193,7 @@ func TypeFor[T any]() *Type { return (*PtrType)(unsafe.Pointer(TypeOf((*T)(nil)))).Elem } -func (t *Type) Kind() Kind { return t.Kind_ & KindMask } +func (t *Type) Kind() Kind { return t.Kind_ } func (t *Type) HasName() bool { return t.TFlag&TFlagNamed != 0 @@ -199,14 +202,9 @@ func (t *Type) HasName() bool { // Pointers reports whether t contains pointers. func (t *Type) Pointers() bool { return t.PtrBytes != 0 } -// IfaceIndir reports whether t is stored indirectly in an interface value. -func (t *Type) IfaceIndir() bool { - return t.Kind_&KindDirectIface == 0 -} - -// isDirectIface reports whether t is stored directly in an interface value. +// IsDirectIface reports whether t is stored directly in an interface value. func (t *Type) IsDirectIface() bool { - return t.Kind_&KindDirectIface != 0 + return t.TFlag&TFlagDirectIface != 0 } func (t *Type) GcSlice(begin, end uintptr) []byte { diff --git a/src/internal/reflectlite/export_test.go b/src/internal/reflectlite/export_test.go index 0ad3d97c15..93762a8387 100644 --- a/src/internal/reflectlite/export_test.go +++ b/src/internal/reflectlite/export_test.go @@ -70,7 +70,7 @@ func Zero(typ Type) Value { } t := typ.common() fl := flag(t.Kind()) - if t.IfaceIndir() { + if !t.IsDirectIface() { return Value{t, unsafe_New(t), fl | flagIndir} } return Value{t, nil, fl} diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go index 7b231d554f..a92df613f5 100644 --- a/src/internal/reflectlite/value.go +++ b/src/internal/reflectlite/value.go @@ -52,7 +52,7 @@ type Value struct { // - flagIndir: val holds a pointer to the data // - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil) // - flagMethod: v is a method value. - // If ifaceIndir(typ), code can assume that flagIndir is set. + // If !typ.IsDirectIface(), code can assume that flagIndir is set. // // The remaining 22+ bits give a method number for method values. // If flag.kind() != Func, code can assume that flagMethod is unset. @@ -118,7 +118,7 @@ func packEface(v Value) any { e := (*abi.EmptyInterface)(unsafe.Pointer(&i)) // First, fill in the data portion of the interface. switch { - case t.IfaceIndir(): + case !t.IsDirectIface(): if v.flag&flagIndir == 0 { panic("bad indir") } @@ -155,7 +155,7 @@ func unpackEface(i any) Value { return Value{} } f := flag(t.Kind()) - if t.IfaceIndir() { + if !t.IsDirectIface() { f |= flagIndir } return Value{t, e.Data, f} diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go index 78fed6a5b7..017942cfad 100644 --- a/src/internal/runtime/maps/map.go +++ b/src/internal/runtime/maps/map.go @@ -859,7 +859,7 @@ func mapKeyError2(t *abi.Type, p unsafe.Pointer) error { return unhashableTypeError{t} } - if t.Kind_&abi.KindDirectIface != 0 { + if t.IsDirectIface() { return mapKeyError2(t, unsafe.Pointer(pdata)) } else { return mapKeyError2(t, *pdata) diff --git a/src/reflect/abi.go b/src/reflect/abi.go index b67d821743..bcd0a95f1b 100644 --- a/src/reflect/abi.go +++ b/src/reflect/abi.go @@ -166,7 +166,7 @@ func (a *abiSeq) addRcvr(rcvr *abi.Type) (*abiStep, bool) { // The receiver is always one word. a.valueStart = append(a.valueStart, len(a.steps)) var ok, ptr bool - if rcvr.IfaceIndir() || rcvr.Pointers() { + if !rcvr.IsDirectIface() || rcvr.Pointers() { ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1) ptr = true } else { diff --git a/src/reflect/badlinkname.go b/src/reflect/badlinkname.go index eb701bff03..8a9bea6721 100644 --- a/src/reflect/badlinkname.go +++ b/src/reflect/badlinkname.go @@ -27,7 +27,7 @@ import ( // //go:linkname unusedIfaceIndir reflect.ifaceIndir func unusedIfaceIndir(t *abi.Type) bool { - return t.Kind_&abi.KindDirectIface == 0 + return !t.IsDirectIface() } //go:linkname valueInterface diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go index 34e5e92055..286ba6300d 100644 --- a/src/reflect/export_noswiss_test.go +++ b/src/reflect/export_noswiss_test.go @@ -7,7 +7,6 @@ package reflect import ( - "internal/abi" "unsafe" ) @@ -17,7 +16,7 @@ func MapBucketOf(x, y Type) Type { func CachedBucketOf(m Type) Type { t := m.(*rtype) - if Kind(t.t.Kind_&abi.KindMask) != Map { + if Kind(t.t.Kind()) != Map { panic("not map") } tt := (*mapType)(unsafe.Pointer(t)) diff --git a/src/reflect/map_swiss.go b/src/reflect/map_swiss.go index 2eac51e57d..2f31d6689f 100644 --- a/src/reflect/map_swiss.go +++ b/src/reflect/map_swiss.go @@ -65,7 +65,7 @@ func MapOf(key, elem Type) Type { var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) mt := **(**mapType)(unsafe.Pointer(&imap)) mt.Str = resolveReflectName(newName(s, "", false, false)) - mt.TFlag = 0 + mt.TFlag = abi.TFlagDirectIface mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) mt.Key = ktyp mt.Elem = etyp diff --git a/src/reflect/type.go b/src/reflect/type.go index 0004cab985..cec8662c01 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -1813,7 +1813,7 @@ func ChanOf(dir ChanDir, t Type) Type { var ichan any = (chan unsafe.Pointer)(nil) prototype := *(**chanType)(unsafe.Pointer(&ichan)) ch := *prototype - ch.TFlag = abi.TFlagRegularMemory + ch.TFlag = abi.TFlagRegularMemory | abi.TFlagDirectIface ch.Dir = abi.ChanDir(dir) ch.Str = resolveReflectName(newName(s, "", false, false)) ch.Hash = fnv1(typ.Hash, 'c', byte(dir)) @@ -1894,7 +1894,7 @@ func FuncOf(in, out []Type, variadic bool) Type { hash = fnv1(hash, byte(t.t.Hash>>24), byte(t.t.Hash>>16), byte(t.t.Hash>>8), byte(t.t.Hash)) } - ft.TFlag = 0 + ft.TFlag = abi.TFlagDirectIface ft.Hash = hash ft.InCount = uint16(len(in)) ft.OutCount = uint16(len(out)) @@ -2313,7 +2313,7 @@ func StructOf(fields []StructField) Type { // Issue 15924. panic("reflect: embedded type with methods not implemented if type is not first field") } - if len(fields) > 1 && ft.Kind_&abi.KindDirectIface != 0 { + if len(fields) > 1 && ft.IsDirectIface() { panic("reflect: embedded type with methods not implemented for non-pointer type") } for _, m := range unt.Methods() { @@ -2524,11 +2524,11 @@ func StructOf(fields []StructField) Type { } switch { - case len(fs) == 1 && !fs[0].Typ.IfaceIndir(): + case len(fs) == 1 && fs[0].Typ.IsDirectIface(): // structs of 1 direct iface type can be direct - typ.Kind_ |= abi.KindDirectIface + typ.TFlag |= abi.TFlagDirectIface default: - typ.Kind_ &^= abi.KindDirectIface + typ.TFlag &^= abi.TFlagDirectIface } return addToCache(toType(&typ.Type)) @@ -2694,11 +2694,11 @@ func ArrayOf(length int, elem Type) Type { } switch { - case length == 1 && !typ.IfaceIndir(): + case length == 1 && typ.IsDirectIface(): // array of 1 direct iface type can be direct - array.Kind_ |= abi.KindDirectIface + array.TFlag |= abi.TFlagDirectIface default: - array.Kind_ &^= abi.KindDirectIface + array.TFlag &^= abi.TFlagDirectIface } ti, _ := lookupCache.LoadOrStore(ckey, toRType(&array.Type)) @@ -2834,7 +2834,7 @@ func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { return } - switch Kind(t.Kind_ & abi.KindMask) { + switch Kind(t.Kind()) { case Chan, Func, Map, Pointer, Slice, String, UnsafePointer: // 1 pointer at start of representation for bv.n < uint32(offset/goarch.PtrSize) { diff --git a/src/reflect/value.go b/src/reflect/value.go index ffdf789648..6f65ef81dc 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -55,7 +55,7 @@ type Value struct { // - flagIndir: val holds a pointer to the data // - flagAddr: v.CanAddr is true (implies flagIndir and ptr is non-nil) // - flagMethod: v is a method value. - // If ifaceIndir(typ), code can assume that flagIndir is set. + // If !typ.IsDirectIface(), code can assume that flagIndir is set. // // The remaining 22+ bits give a method number for method values. // If flag.kind() != Func, code can assume that flagMethod is unset. @@ -125,7 +125,7 @@ func packEface(v Value) any { e := abi.EmptyInterface{} // First, fill in the data portion of the interface. switch { - case t.IfaceIndir(): + case !t.IsDirectIface(): if v.flag&flagIndir == 0 { panic("bad indir") } @@ -159,7 +159,7 @@ func unpackEface(i any) Value { return Value{} } f := flag(t.Kind()) - if t.IfaceIndir() { + if !t.IsDirectIface() { f |= flagIndir } return Value{t, e.Data, f} @@ -624,7 +624,7 @@ func (v Value) call(op string, in []Value) []Value { } // Handle pointers passed in registers. - if !tv.IfaceIndir() { + if tv.IsDirectIface() { // Pointer-valued data gets put directly // into v.ptr. if steps[0].kind != abiStepPointer { @@ -714,7 +714,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs v := Value{typ, nil, flag(typ.Kind())} steps := abid.call.stepsForValue(i) if st := steps[0]; st.kind == abiStepStack { - if typ.IfaceIndir() { + if !typ.IsDirectIface() { // value cannot be inlined in interface data. // Must make a copy, because f might keep a reference to it, // and we cannot let f keep a reference to the stack frame @@ -728,7 +728,7 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer, retValid *bool, regs v.ptr = *(*unsafe.Pointer)(add(ptr, st.stkOff, "1-ptr")) } } else { - if typ.IfaceIndir() { + if !typ.IsDirectIface() { // All that's left is values passed in registers that we need to // create space for the values. v.flag |= flagIndir @@ -914,7 +914,7 @@ func storeRcvr(v Value, p unsafe.Pointer) { // the interface data word becomes the receiver word iface := (*nonEmptyInterface)(v.ptr) *(*unsafe.Pointer)(p) = iface.word - } else if v.flag&flagIndir != 0 && !t.IfaceIndir() { + } else if v.flag&flagIndir != 0 && t.IsDirectIface() { *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr) } else { *(*unsafe.Pointer)(p) = v.ptr @@ -1224,7 +1224,7 @@ func (v Value) Elem() Value { case Pointer: ptr := v.ptr if v.flag&flagIndir != 0 { - if v.typ().IfaceIndir() { + if !v.typ().IsDirectIface() { // This is a pointer to a not-in-heap object. ptr points to a uintptr // in the heap. That uintptr is the address of a not-in-heap object. // In general, pointers to not-in-heap objects can be total junk. @@ -1852,7 +1852,7 @@ func (v Value) lenNonSlice() int { // copyVal returns a Value containing the map key or value at ptr, // allocating a new variable as needed. func copyVal(typ *abi.Type, fl flag, ptr unsafe.Pointer) Value { - if typ.IfaceIndir() { + if !typ.IsDirectIface() { // Copy result so future changes to the map // won't change the underlying value. c := unsafe_New(typ) @@ -2076,7 +2076,7 @@ func (v Value) recv(nb bool) (val Value, ok bool) { t := tt.Elem val = Value{t, nil, flag(t.Kind())} var p unsafe.Pointer - if t.IfaceIndir() { + if !t.IsDirectIface() { p = unsafe_New(t) val.ptr = p val.flag |= flagIndir @@ -2952,7 +2952,7 @@ func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) { t := tt.Elem p := runcases[chosen].val fl := flag(t.Kind()) - if t.IfaceIndir() { + if !t.IsDirectIface() { recv = Value{t, p, fl | flagIndir} } else { recv = Value{t, *(*unsafe.Pointer)(p), fl} @@ -3065,7 +3065,7 @@ func Zero(typ Type) Value { } t := &typ.(*rtype).t fl := flag(t.Kind()) - if t.IfaceIndir() { + if !t.IsDirectIface() { var p unsafe.Pointer if t.Size() <= abi.ZeroValSize { p = unsafe.Pointer(&zeroVal[0]) @@ -3088,7 +3088,7 @@ func New(typ Type) Value { } t := &typ.(*rtype).t pt := ptrTo(t) - if pt.IfaceIndir() { + if !pt.IsDirectIface() { // This is a pointer to a not-in-heap type. panic("reflect: New of type that may not be allocated in heap (possibly undefined cgo C type)") } diff --git a/src/runtime/alg.go b/src/runtime/alg.go index df32bc7941..b956f9d05a 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -144,7 +144,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { // we want to report the struct, not the slice). panic(errorString("hash of unhashable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { return c1 * typehash(t, a.data, h^c0) @@ -171,7 +171,7 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { // See comment in interhash above. panic(errorString("hash of unhashable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) } else { return c1 * typehash(t, a.data, h^c0) @@ -211,7 +211,7 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { return memhash(p, h, t.Size_) } } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.Float32: return f32hash(p, h) case abi.Float64: @@ -306,7 +306,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool { if eq == nil { panic(errorString("comparing uncomparable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { // Direct interface types are ptr, chan, map, func, and single-element structs/arrays thereof. // Maps and funcs are not comparable, so they can't reach here. // Ptrs, chans, and single-element items can be compared directly using ==. @@ -323,7 +323,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { if eq == nil { panic(errorString("comparing uncomparable type " + toRType(t).string())) } - if isDirectIface(t) { + if t.IsDirectIface() { // See comment in efaceeq. return x == y } diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 0aa3876aaa..52a2a99d6c 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -111,7 +111,7 @@ func arena_newArena() unsafe.Pointer { //go:linkname arena_arena_New arena.runtime_arena_arena_New func arena_arena_New(arena unsafe.Pointer, typ any) any { t := (*_type)(efaceOf(&typ).data) - if t.Kind_&abi.KindMask != abi.Pointer { + if t.Kind() != abi.Pointer { throw("arena_New: non-pointer type") } te := (*ptrtype)(unsafe.Pointer(t)).Elem @@ -145,7 +145,7 @@ func arena_heapify(s any) any { var v unsafe.Pointer e := efaceOf(&s) t := e._type - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.String: v = stringStructOf((*string)(e.data)).str case abi.Slice: @@ -162,7 +162,7 @@ func arena_heapify(s any) any { } // Heap-allocate storage for a copy. var x any - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.String: s1 := s.(string) s2, b := rawstring(len(s1)) @@ -293,11 +293,11 @@ func (a *userArena) slice(sl any, cap int) { } i := efaceOf(&sl) typ := i._type - if typ.Kind_&abi.KindMask != abi.Pointer { + if typ.Kind() != abi.Pointer { panic("slice result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).Elem - if typ.Kind_&abi.KindMask != abi.Slice { + if typ.Kind() != abi.Slice { panic("slice of non-ptr-to-slice type") } typ = (*slicetype)(unsafe.Pointer(typ)).Elem diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index 5bb6f58bc2..18e1dc8baf 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -543,18 +543,18 @@ func cgoCheckPointer(ptr any, arg any) { t := ep._type top := true - if arg != nil && (t.Kind_&abi.KindMask == abi.Pointer || t.Kind_&abi.KindMask == abi.UnsafePointer) { + if arg != nil && (t.Kind() == abi.Pointer || t.Kind() == abi.UnsafePointer) { p := ep.data - if t.Kind_&abi.KindDirectIface == 0 { + if !t.IsDirectIface() { p = *(*unsafe.Pointer)(p) } if p == nil || !cgoIsGoPointer(p) { return } aep := efaceOf(&arg) - switch aep._type.Kind_ & abi.KindMask { + switch aep._type.Kind() { case abi.Bool: - if t.Kind_&abi.KindMask == abi.UnsafePointer { + if t.Kind() == abi.UnsafePointer { // We don't know the type of the element. break } @@ -578,7 +578,7 @@ func cgoCheckPointer(ptr any, arg any) { // Check the array rather than the pointer. pt := (*abi.PtrType)(unsafe.Pointer(aep._type)) t = pt.Elem - if t.Kind_&abi.KindMask != abi.Array { + if t.Kind() != abi.Array { throw("can't happen") } ep = aep @@ -588,7 +588,7 @@ func cgoCheckPointer(ptr any, arg any) { } } - cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, top, cgoCheckPointerFail) + cgoCheckArg(t, ep.data, !t.IsDirectIface(), top, cgoCheckPointerFail) } const cgoCheckPointerFail = "cgo argument has Go pointer to unpinned Go pointer" @@ -605,7 +605,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { default: throw("can't happen") case abi.Array: @@ -614,7 +614,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if at.Len != 1 { throw("can't happen") } - cgoCheckArg(at.Elem, p, at.Elem.Kind_&abi.KindDirectIface == 0, top, msg) + cgoCheckArg(at.Elem, p, !at.Elem.IsDirectIface(), top, msg) return } for i := uintptr(0); i < at.Len; i++ { @@ -652,7 +652,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !top && !isPinned(p) { panic(errorString(msg)) } - cgoCheckArg(it, p, it.Kind_&abi.KindDirectIface == 0, false, msg) + cgoCheckArg(it, p, !it.IsDirectIface(), false, msg) case abi.Slice: st := (*slicetype)(unsafe.Pointer(t)) s := (*slice)(p) @@ -684,7 +684,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if len(st.Fields) != 1 { throw("can't happen") } - cgoCheckArg(st.Fields[0].Typ, p, st.Fields[0].Typ.Kind_&abi.KindDirectIface == 0, top, msg) + cgoCheckArg(st.Fields[0].Typ, p, !st.Fields[0].Typ.IsDirectIface(), top, msg) return } for _, f := range st.Fields { @@ -792,5 +792,5 @@ func cgoCheckResult(val any) { ep := efaceOf(&val) t := ep._type - cgoCheckArg(t, ep.data, t.Kind_&abi.KindDirectIface == 0, false, cgoResultFail) + cgoCheckArg(t, ep.data, !t.IsDirectIface(), false, cgoResultFail) } diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index 50fba3568d..e993e396c1 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -327,7 +327,7 @@ func (l *dloggerImpl) p(x any) *dloggerImpl { l.w.uvarint(0) } else { v := efaceOf(&x) - switch v._type.Kind_ & abi.KindMask { + switch v._type.Kind() { case abi.Chan, abi.Func, abi.Map, abi.Pointer, abi.UnsafePointer: l.w.uvarint(uint64(uintptr(v.data))) default: diff --git a/src/runtime/error.go b/src/runtime/error.go index a2611bdfd9..f95b14d780 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -260,7 +260,7 @@ func printanycustomtype(i any) { eface := efaceOf(&i) typestring := toRType(eface._type).string() - switch eface._type.Kind_ { + switch eface._type.Kind() { case abi.String: print(typestring, `("`) printindented(*(*string)(eface.data)) diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go index 96f6fd9eea..94dc974804 100644 --- a/src/runtime/export_debug_test.go +++ b/src/runtime/export_debug_test.go @@ -33,13 +33,13 @@ func InjectDebugCall(gp *g, fn any, regArgs *abi.RegArgs, stackArgs any, tkill f } f := efaceOf(&fn) - if f._type == nil || f._type.Kind_&abi.KindMask != abi.Func { + if f._type == nil || f._type.Kind() != abi.Func { return nil, plainError("fn must be a function") } fv := (*funcval)(f.data) a := efaceOf(&stackArgs) - if a._type != nil && a._type.Kind_&abi.KindMask != abi.Pointer { + if a._type != nil && a._type.Kind() != abi.Pointer { return nil, plainError("args must be a pointer or nil") } argp := a.data diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 2fc5b4a38a..3af1362ee2 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -1773,7 +1773,7 @@ func NewUserArena() *UserArena { func (a *UserArena) New(out *any) { i := efaceOf(out) typ := i._type - if typ.Kind_&abi.KindMask != abi.Pointer { + if typ.Kind() != abi.Pointer { panic("new result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).Elem diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 72878d0728..d9474034c2 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -206,7 +206,7 @@ func dumptype(t *_type) { dwritebyte('.') dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name))) } - dumpbool(t.Kind_&abi.KindDirectIface == 0 || t.Pointers()) + dumpbool(!t.IsDirectIface() || t.Pointers()) } // dump an object. diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index b2557275ec..9872e5297f 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -947,7 +947,7 @@ func doubleCheckTypePointersOfType(s *mspan, typ *_type, addr, size uintptr) { if typ == nil { return } - if typ.Kind_&abi.KindMask == abi.Interface { + if typ.Kind() == abi.Interface { // Interfaces are unfortunately inconsistently handled // when it comes to the type pointer, so it's easy to // produce a lot of false positives here. @@ -1776,7 +1776,7 @@ func pointerMask(ep any) (mask []byte) { t := e._type var et *_type - if t.Kind_&abi.KindMask != abi.Pointer { + if t.Kind() != abi.Pointer { throw("bad argument to getgcmask: expected type to be a pointer to the value type whose mask is being queried") } et = (*ptrtype)(unsafe.Pointer(t)).Elem diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 2d4a54c933..bafdb01603 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -251,7 +251,7 @@ func runFinalizers() { // confusing the write barrier. *(*[2]uintptr)(frame) = [2]uintptr{} } - switch f.fint.Kind_ & abi.KindMask { + switch f.fint.Kind() { case abi.Pointer: // direct use of pointer *(*unsafe.Pointer)(r) = f.arg @@ -435,7 +435,7 @@ func SetFinalizer(obj any, finalizer any) { if etyp == nil { throw("runtime.SetFinalizer: first argument is nil") } - if etyp.Kind_&abi.KindMask != abi.Pointer { + if etyp.Kind() != abi.Pointer { throw("runtime.SetFinalizer: first argument is " + toRType(etyp).string() + ", not pointer") } ot := (*ptrtype)(unsafe.Pointer(etyp)) @@ -490,7 +490,7 @@ func SetFinalizer(obj any, finalizer any) { return } - if ftyp.Kind_&abi.KindMask != abi.Func { + if ftyp.Kind() != abi.Func { throw("runtime.SetFinalizer: second argument is " + toRType(ftyp).string() + ", not a function") } ft := (*functype)(unsafe.Pointer(ftyp)) @@ -505,13 +505,13 @@ func SetFinalizer(obj any, finalizer any) { case fint == etyp: // ok - same type goto okarg - case fint.Kind_&abi.KindMask == abi.Pointer: + case fint.Kind() == abi.Pointer: if (fint.Uncommon() == nil || etyp.Uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).Elem == ot.Elem { // ok - not same type, but both pointers, // one or the other is unnamed, and same element type, so assignable. goto okarg } - case fint.Kind_&abi.KindMask == abi.Interface: + case fint.Kind() == abi.Interface: ityp := (*interfacetype)(unsafe.Pointer(fint)) if len(ityp.Methods) == 0 { // ok - satisfies empty interface diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index 543bfdb7a4..424dd065ef 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -108,7 +108,7 @@ func pinnerGetPtr(i *any) unsafe.Pointer { if etyp == nil { panic(errorString("runtime.Pinner: argument is nil")) } - if kind := etyp.Kind_ & abi.KindMask; kind != abi.Pointer && kind != abi.UnsafePointer { + if kind := etyp.Kind(); kind != abi.Pointer && kind != abi.UnsafePointer { panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType(etyp).string())) } if inUserArenaChunk(uintptr(e.data)) { diff --git a/src/runtime/plugin.go b/src/runtime/plugin.go index 4b6821b1fb..49cf13cb64 100644 --- a/src/runtime/plugin.go +++ b/src/runtime/plugin.go @@ -88,7 +88,7 @@ func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*ini (*valp)[0] = unsafe.Pointer(t) name := symName.Name() - if t.Kind_&abi.KindMask == abi.Func { + if t.Kind() == abi.Func { name = "." + name } syms[name] = val diff --git a/src/runtime/race.go b/src/runtime/race.go index 7e7bca76ac..2cd4e3a9a2 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -156,7 +156,7 @@ const raceenabled = true // callerpc is a return PC of the function that calls this function, // pc is start PC of the function that calls this function. func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.Kind_ & abi.KindMask + kind := t.Kind() if kind == abi.Array || kind == abi.Struct { // for composite objects we have to read every address // because a write might happen to any subobject. @@ -174,7 +174,7 @@ func race_ReadObjectPC(t *abi.Type, addr unsafe.Pointer, callerpc, pc uintptr) { } func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.Kind_ & abi.KindMask + kind := t.Kind() if kind == abi.Array || kind == abi.Struct { // for composite objects we have to write every address // because a write might happen to any subobject. diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 93392896cb..0f74ebbe24 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -104,7 +104,7 @@ func (p *abiDesc) assignArg(t *_type) { // registers and the stack. panic("compileCallback: argument size is larger than uintptr") } - if k := t.Kind_ & abi.KindMask; GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) { + if k := t.Kind(); GOARCH != "386" && (k == abi.Float32 || k == abi.Float64) { // In fastcall, floating-point arguments in // the first four positions are passed in // floating-point registers, which we don't @@ -175,7 +175,7 @@ func (p *abiDesc) assignArg(t *_type) { // // Returns whether the assignment succeeded. func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool { - switch k := t.Kind_ & abi.KindMask; k { + switch k := t.Kind(); k { case abi.Bool, abi.Int, abi.Int8, abi.Int16, abi.Int32, abi.Uint, abi.Uint8, abi.Uint16, abi.Uint32, abi.Uintptr, abi.Pointer, abi.UnsafePointer: // Assign a register for all these types. return p.assignReg(t.Size_, offset) @@ -270,7 +270,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { cdecl = false } - if fn._type == nil || (fn._type.Kind_&abi.KindMask) != abi.Func { + if fn._type == nil || fn._type.Kind() != abi.Func { panic("compileCallback: expected function with one uintptr-sized result") } ft := (*functype)(unsafe.Pointer(fn._type)) @@ -291,7 +291,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { if ft.OutSlice()[0].Size_ != goarch.PtrSize { panic("compileCallback: expected function with one uintptr-sized result") } - if k := ft.OutSlice()[0].Kind_ & abi.KindMask; k == abi.Float32 || k == abi.Float64 { + if k := ft.OutSlice()[0].Kind(); k == abi.Float32 || k == abi.Float64 { // In cdecl and stdcall, float results are returned in // ST(0). In fastcall, they're returned in XMM0. // Either way, it's not AX. diff --git a/src/runtime/type.go b/src/runtime/type.go index c11c866cd8..baaaeafc4a 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -69,7 +69,7 @@ func (t rtype) pkgpath() string { if u := t.uncommon(); u != nil { return t.nameOff(u.PkgPath).Name() } - switch t.Kind_ & abi.KindMask { + switch t.Kind() { case abi.Struct: st := (*structtype)(unsafe.Pointer(t.Type)) return st.PkgPath.Name() @@ -522,8 +522,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { if t == v { return true } - kind := t.Kind_ & abi.KindMask - if kind != v.Kind_&abi.KindMask { + kind := t.Kind() + if kind != v.Kind() { return false } rt, rv := toRType(t), toRType(v) diff --git a/src/runtime/typekind.go b/src/runtime/typekind.go deleted file mode 100644 index 4920a7cf14..0000000000 --- a/src/runtime/typekind.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package runtime - -import "internal/abi" - -// isDirectIface reports whether t is stored directly in an interface value. -func isDirectIface(t *_type) bool { - return t.Kind_&abi.KindDirectIface != 0 -} -- cgit v1.3-6-g1900 From 2ae059ccaf982c3304fae0b48c1d78ad7192cbdd Mon Sep 17 00:00:00 2001 From: Michael Pratt Date: Fri, 25 Jul 2025 15:35:36 -0400 Subject: all: remove GOEXPERIMENT=swissmap For #54766. Change-Id: I6a6a636c40b5fe2e8b0d4a5e23933492bc8bb76e Reviewed-on: https://go-review.googlesource.com/c/go/+/691595 Reviewed-by: Keith Randall Auto-Submit: Michael Pratt LUCI-TryBot-Result: Go LUCI Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/main.go | 10 +- src/cmd/compile/internal/reflectdata/map.go | 310 ++++ .../compile/internal/reflectdata/map_noswiss.go | 305 ---- src/cmd/compile/internal/reflectdata/map_swiss.go | 310 ---- src/cmd/compile/internal/reflectdata/reflect.go | 13 +- src/cmd/compile/internal/rttype/rttype.go | 6 +- src/cmd/compile/internal/ssagen/ssa.go | 23 +- src/cmd/compile/internal/test/inl_test.go | 10 - .../compile/internal/typecheck/_builtin/runtime.go | 6 +- src/cmd/compile/internal/typecheck/builtin.go | 2 - src/cmd/compile/internal/types/fmt.go | 4 +- src/cmd/compile/internal/types/sizeof_test.go | 2 +- src/cmd/compile/internal/types/type.go | 52 +- src/cmd/compile/internal/walk/builtin.go | 109 +- src/cmd/compile/internal/walk/order.go | 9 +- src/cmd/compile/internal/walk/range.go | 20 +- src/cmd/compile/internal/walk/walk.go | 38 +- src/cmd/link/internal/ld/deadcode.go | 10 +- src/cmd/link/internal/ld/dwarf.go | 131 +- src/cmd/link/internal/ld/dwarf_test.go | 11 +- src/hash/maphash/maphash_runtime.go | 8 +- src/internal/abi/map.go | 64 + src/internal/abi/map_noswiss.go | 54 - src/internal/abi/map_select_noswiss.go | 10 - src/internal/abi/map_select_swiss.go | 22 - src/internal/abi/map_swiss.go | 64 - src/internal/abi/type.go | 12 +- src/internal/buildcfg/exp.go | 1 - src/internal/goexperiment/exp_swissmap_off.go | 8 - src/internal/goexperiment/exp_swissmap_on.go | 8 - src/internal/goexperiment/flags.go | 3 - src/internal/runtime/maps/export_noswiss_test.go | 51 - src/internal/runtime/maps/export_swiss_test.go | 19 - src/internal/runtime/maps/export_test.go | 7 + src/internal/runtime/maps/map.go | 9 +- src/internal/runtime/maps/map_swiss_test.go | 267 --- src/internal/runtime/maps/map_test.go | 249 +++ src/internal/runtime/maps/runtime.go | 338 ++++ src/internal/runtime/maps/runtime_fast32.go | 473 +++++ src/internal/runtime/maps/runtime_fast32_swiss.go | 475 ----- src/internal/runtime/maps/runtime_fast64.go | 512 ++++++ src/internal/runtime/maps/runtime_fast64_swiss.go | 514 ------ src/internal/runtime/maps/runtime_faststr.go | 411 +++++ src/internal/runtime/maps/runtime_faststr_swiss.go | 413 ----- src/internal/runtime/maps/runtime_swiss.go | 352 ---- src/reflect/all_test.go | 8 +- src/reflect/export_noswiss_test.go | 24 - src/reflect/export_swiss_test.go | 12 - src/reflect/export_test.go | 5 + src/reflect/map.go | 449 +++++ src/reflect/map_noswiss.go | 484 ----- src/reflect/map_noswiss_test.go | 60 - src/reflect/map_swiss.go | 460 ----- src/reflect/map_swiss_test.go | 30 - src/reflect/map_test.go | 23 + src/runtime/export_map_noswiss_test.go | 64 - src/runtime/export_map_swiss_test.go | 11 - src/runtime/export_test.go | 3 - src/runtime/linkname_shim.go | 208 +++ src/runtime/linkname_swiss.go | 211 --- src/runtime/map.go | 329 ++++ src/runtime/map_fast32.go | 53 + src/runtime/map_fast32_noswiss.go | 493 ----- src/runtime/map_fast32_swiss.go | 55 - src/runtime/map_fast64.go | 54 + src/runtime/map_fast64_noswiss.go | 502 ------ src/runtime/map_fast64_swiss.go | 56 - src/runtime/map_faststr.go | 42 + src/runtime/map_faststr_noswiss.go | 507 ------ src/runtime/map_faststr_swiss.go | 44 - src/runtime/map_noswiss.go | 1891 -------------------- src/runtime/map_noswiss_test.go | 214 --- src/runtime/map_swiss.go | 349 ---- src/runtime/map_swiss_test.go | 75 - src/runtime/map_test.go | 126 +- src/runtime/runtime-gdb.py | 41 - src/runtime/runtime-gdb_test.go | 41 +- src/runtime/type.go | 10 +- test/fixedbugs/issue69110.go | 59 - test/fixedbugs/issue70189.go | 2 +- test/live.go | 4 +- test/live2.go | 4 +- test/live_regabi.go | 4 +- 83 files changed, 3677 insertions(+), 9055 deletions(-) create mode 100644 src/cmd/compile/internal/reflectdata/map.go delete mode 100644 src/cmd/compile/internal/reflectdata/map_noswiss.go delete mode 100644 src/cmd/compile/internal/reflectdata/map_swiss.go create mode 100644 src/internal/abi/map.go delete mode 100644 src/internal/abi/map_noswiss.go delete mode 100644 src/internal/abi/map_select_noswiss.go delete mode 100644 src/internal/abi/map_select_swiss.go delete mode 100644 src/internal/abi/map_swiss.go delete mode 100644 src/internal/goexperiment/exp_swissmap_off.go delete mode 100644 src/internal/goexperiment/exp_swissmap_on.go delete mode 100644 src/internal/runtime/maps/export_noswiss_test.go delete mode 100644 src/internal/runtime/maps/export_swiss_test.go delete mode 100644 src/internal/runtime/maps/map_swiss_test.go create mode 100644 src/internal/runtime/maps/runtime_fast32.go delete mode 100644 src/internal/runtime/maps/runtime_fast32_swiss.go create mode 100644 src/internal/runtime/maps/runtime_fast64.go delete mode 100644 src/internal/runtime/maps/runtime_fast64_swiss.go create mode 100644 src/internal/runtime/maps/runtime_faststr.go delete mode 100644 src/internal/runtime/maps/runtime_faststr_swiss.go delete mode 100644 src/internal/runtime/maps/runtime_swiss.go delete mode 100644 src/reflect/export_noswiss_test.go delete mode 100644 src/reflect/export_swiss_test.go create mode 100644 src/reflect/map.go delete mode 100644 src/reflect/map_noswiss.go delete mode 100644 src/reflect/map_noswiss_test.go delete mode 100644 src/reflect/map_swiss.go delete mode 100644 src/reflect/map_swiss_test.go create mode 100644 src/reflect/map_test.go delete mode 100644 src/runtime/export_map_noswiss_test.go delete mode 100644 src/runtime/export_map_swiss_test.go create mode 100644 src/runtime/linkname_shim.go delete mode 100644 src/runtime/linkname_swiss.go create mode 100644 src/runtime/map.go create mode 100644 src/runtime/map_fast32.go delete mode 100644 src/runtime/map_fast32_noswiss.go delete mode 100644 src/runtime/map_fast32_swiss.go create mode 100644 src/runtime/map_fast64.go delete mode 100644 src/runtime/map_fast64_noswiss.go delete mode 100644 src/runtime/map_fast64_swiss.go create mode 100644 src/runtime/map_faststr.go delete mode 100644 src/runtime/map_faststr_noswiss.go delete mode 100644 src/runtime/map_faststr_swiss.go delete mode 100644 src/runtime/map_noswiss.go delete mode 100644 src/runtime/map_noswiss_test.go delete mode 100644 src/runtime/map_swiss.go delete mode 100644 src/runtime/map_swiss_test.go delete mode 100644 test/fixedbugs/issue69110.go (limited to 'src/runtime/export_test.go') diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 253ec3257a..42e2afaee4 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -104,12 +104,10 @@ func Main(archInit func(*ssagen.ArchInfo)) { ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime") ir.Pkgs.Runtime.Prefix = "runtime" - if buildcfg.Experiment.SwissMap { - // Pseudo-package that contains the compiler's builtin - // declarations for maps. - ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps") - ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps" - } + // Pseudo-package that contains the compiler's builtin + // declarations for maps. + ir.Pkgs.InternalMaps = types.NewPkg("go.internal/runtime/maps", "internal/runtime/maps") + ir.Pkgs.InternalMaps.Prefix = "internal/runtime/maps" // pseudo-packages used in symbol tables ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab") diff --git a/src/cmd/compile/internal/reflectdata/map.go b/src/cmd/compile/internal/reflectdata/map.go new file mode 100644 index 0000000000..54266a604a --- /dev/null +++ b/src/cmd/compile/internal/reflectdata/map.go @@ -0,0 +1,310 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflectdata + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/rttype" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" + "cmd/internal/src" + "internal/abi" +) + +// SwissMapGroupType makes the map slot group type given the type of the map. +func SwissMapGroupType(t *types.Type) *types.Type { + if t.MapType().SwissGroup != nil { + return t.MapType().SwissGroup + } + + // Builds a type representing a group structure for the given map type. + // This type is not visible to users, we include it so we can generate + // a correct GC program for it. + // + // Make sure this stays in sync with internal/runtime/maps/group.go. + // + // type group struct { + // ctrl uint64 + // slots [abi.SwissMapGroupSlots]struct { + // key keyType + // elem elemType + // } + // } + + keytype := t.Key() + elemtype := t.Elem() + types.CalcSize(keytype) + types.CalcSize(elemtype) + if keytype.Size() > abi.SwissMapMaxKeyBytes { + keytype = types.NewPtr(keytype) + } + if elemtype.Size() > abi.SwissMapMaxElemBytes { + elemtype = types.NewPtr(elemtype) + } + + slotFields := []*types.Field{ + makefield("key", keytype), + makefield("elem", elemtype), + } + slot := types.NewStruct(slotFields) + slot.SetNoalg(true) + + slotArr := types.NewArray(slot, abi.SwissMapGroupSlots) + slotArr.SetNoalg(true) + + fields := []*types.Field{ + makefield("ctrl", types.Types[types.TUINT64]), + makefield("slots", slotArr), + } + + group := types.NewStruct(fields) + group.SetNoalg(true) + types.CalcSize(group) + + // Check invariants that map code depends on. + if !types.IsComparable(t.Key()) { + base.Fatalf("unsupported map key type for %v", t) + } + if group.Size() <= 8 { + // internal/runtime/maps creates pointers to slots, even if + // both key and elem are size zero. In this case, each slot is + // size 0, but group should still reserve a word of padding at + // the end to ensure pointers are valid. + base.Fatalf("bad group size for %v", t) + } + if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() { + base.Fatalf("key indirect incorrect for %v", t) + } + if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() { + base.Fatalf("elem indirect incorrect for %v", t) + } + + t.MapType().SwissGroup = group + group.StructType().Map = t + return group +} + +var cachedSwissTableType *types.Type + +// swissTableType returns a type interchangeable with internal/runtime/maps.table. +// Make sure this stays in sync with internal/runtime/maps/table.go. +func swissTableType() *types.Type { + if cachedSwissTableType != nil { + return cachedSwissTableType + } + + // type table struct { + // used uint16 + // capacity uint16 + // growthLeft uint16 + // localDepth uint8 + // // N.B Padding + // + // index int + // + // // From groups. + // groups_data unsafe.Pointer + // groups_lengthMask uint64 + // } + // must match internal/runtime/maps/table.go:table. + fields := []*types.Field{ + makefield("used", types.Types[types.TUINT16]), + makefield("capacity", types.Types[types.TUINT16]), + makefield("growthLeft", types.Types[types.TUINT16]), + makefield("localDepth", types.Types[types.TUINT8]), + makefield("index", types.Types[types.TINT]), + makefield("groups_data", types.Types[types.TUNSAFEPTR]), + makefield("groups_lengthMask", types.Types[types.TUINT64]), + } + + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("table")) + table := types.NewNamed(n) + n.SetType(table) + n.SetTypecheck(1) + + table.SetUnderlying(types.NewStruct(fields)) + types.CalcSize(table) + + // The size of table should be 32 bytes on 64 bit + // and 24 bytes on 32 bit platforms. + if size := int64(3*2 + 2*1 /* one extra for padding */ + 1*8 + 2*types.PtrSize); table.Size() != size { + base.Fatalf("internal/runtime/maps.table size not correct: got %d, want %d", table.Size(), size) + } + + cachedSwissTableType = table + return table +} + +var cachedSwissMapType *types.Type + +// SwissMapType returns a type interchangeable with internal/runtime/maps.Map. +// Make sure this stays in sync with internal/runtime/maps/map.go. +func SwissMapType() *types.Type { + if cachedSwissMapType != nil { + return cachedSwissMapType + } + + // type Map struct { + // used uint64 + // seed uintptr + // + // dirPtr unsafe.Pointer + // dirLen int + // + // globalDepth uint8 + // globalShift uint8 + // + // writing uint8 + // tombstonePossible bool + // // N.B Padding + // + // clearSeq uint64 + // } + // must match internal/runtime/maps/map.go:Map. + fields := []*types.Field{ + makefield("used", types.Types[types.TUINT64]), + makefield("seed", types.Types[types.TUINTPTR]), + makefield("dirPtr", types.Types[types.TUNSAFEPTR]), + makefield("dirLen", types.Types[types.TINT]), + makefield("globalDepth", types.Types[types.TUINT8]), + makefield("globalShift", types.Types[types.TUINT8]), + makefield("writing", types.Types[types.TUINT8]), + makefield("tombstonePossible", types.Types[types.TBOOL]), + makefield("clearSeq", types.Types[types.TUINT64]), + } + + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Map")) + m := types.NewNamed(n) + n.SetType(m) + n.SetTypecheck(1) + + m.SetUnderlying(types.NewStruct(fields)) + types.CalcSize(m) + + // The size of Map should be 48 bytes on 64 bit + // and 32 bytes on 32 bit platforms. + if size := int64(2*8 + 4*types.PtrSize /* one extra for globalDepth/globalShift/writing + padding */); m.Size() != size { + base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size) + } + + cachedSwissMapType = m + return m +} + +var cachedSwissIterType *types.Type + +// SwissMapIterType returns a type interchangeable with runtime.hiter. +// Make sure this stays in sync with runtime/map.go. +func SwissMapIterType() *types.Type { + if cachedSwissIterType != nil { + return cachedSwissIterType + } + + // type Iter struct { + // key unsafe.Pointer // *Key + // elem unsafe.Pointer // *Elem + // typ unsafe.Pointer // *SwissMapType + // m *Map + // + // groupSlotOffset uint64 + // dirOffset uint64 + // + // clearSeq uint64 + // + // globalDepth uint8 + // // N.B. padding + // + // dirIdx int + // + // tab *table + // + // group unsafe.Pointer // actually groupReference.data + // + // entryIdx uint64 + // } + // must match internal/runtime/maps/table.go:Iter. + fields := []*types.Field{ + makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. + makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. + makefield("typ", types.Types[types.TUNSAFEPTR]), + makefield("m", types.NewPtr(SwissMapType())), + makefield("groupSlotOffset", types.Types[types.TUINT64]), + makefield("dirOffset", types.Types[types.TUINT64]), + makefield("clearSeq", types.Types[types.TUINT64]), + makefield("globalDepth", types.Types[types.TUINT8]), + makefield("dirIdx", types.Types[types.TINT]), + makefield("tab", types.NewPtr(swissTableType())), + makefield("group", types.Types[types.TUNSAFEPTR]), + makefield("entryIdx", types.Types[types.TUINT64]), + } + + // build iterator struct holding the above fields + n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Iter")) + iter := types.NewNamed(n) + n.SetType(iter) + n.SetTypecheck(1) + + iter.SetUnderlying(types.NewStruct(fields)) + types.CalcSize(iter) + + // The size of Iter should be 96 bytes on 64 bit + // and 64 bytes on 32 bit platforms. + if size := 8*types.PtrSize /* one extra for globalDepth + padding */ + 4*8; iter.Size() != int64(size) { + base.Fatalf("internal/runtime/maps.Iter size not correct: got %d, want %d", iter.Size(), size) + } + + cachedSwissIterType = iter + return iter +} + +func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) { + // internal/abi.SwissMapType + gtyp := SwissMapGroupType(t) + s1 := writeType(t.Key()) + s2 := writeType(t.Elem()) + s3 := writeType(gtyp) + hasher := genhash(t.Key()) + + slotTyp := gtyp.Field(1).Type.Elem() + elemOff := slotTyp.Field(1).Offset + if AlgType(t.Key()) == types.AMEM64 && elemOff != 8 { + base.Fatalf("runtime assumes elemOff for 8-byte keys is 8, got %d", elemOff) + } + if AlgType(t.Key()) == types.ASTRING && elemOff != int64(2*types.PtrSize) { + base.Fatalf("runtime assumes elemOff for string keys is %d, got %d", 2*types.PtrSize, elemOff) + } + + c.Field("Key").WritePtr(s1) + c.Field("Elem").WritePtr(s2) + c.Field("Group").WritePtr(s3) + c.Field("Hasher").WritePtr(hasher) + c.Field("GroupSize").WriteUintptr(uint64(gtyp.Size())) + c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size())) + c.Field("ElemOff").WriteUintptr(uint64(elemOff)) + var flags uint32 + if needkeyupdate(t.Key()) { + flags |= abi.SwissMapNeedKeyUpdate + } + if hashMightPanic(t.Key()) { + flags |= abi.SwissMapHashMightPanic + } + if t.Key().Size() > abi.SwissMapMaxKeyBytes { + flags |= abi.SwissMapIndirectKey + } + if t.Elem().Size() > abi.SwissMapMaxKeyBytes { + flags |= abi.SwissMapIndirectElem + } + c.Field("Flags").WriteUint32(flags) + + if u := t.Underlying(); u != t { + // If t is a named map type, also keep the underlying map + // type live in the binary. This is important to make sure that + // a named map and that same map cast to its underlying type via + // reflection, use the same hash function. See issue 37716. + lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)}) + } +} diff --git a/src/cmd/compile/internal/reflectdata/map_noswiss.go b/src/cmd/compile/internal/reflectdata/map_noswiss.go deleted file mode 100644 index a6fab4cbac..0000000000 --- a/src/cmd/compile/internal/reflectdata/map_noswiss.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package reflectdata - -import ( - "internal/abi" - - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/rttype" - "cmd/compile/internal/types" - "cmd/internal/obj" - "cmd/internal/objabi" - "cmd/internal/src" -) - -// OldMapBucketType makes the map bucket type given the type of the map. -func OldMapBucketType(t *types.Type) *types.Type { - // Builds a type representing a Bucket structure for - // the given map type. This type is not visible to users - - // we include only enough information to generate a correct GC - // program for it. - // Make sure this stays in sync with runtime/map.go. - // - // A "bucket" is a "struct" { - // tophash [abi.OldMapBucketCount]uint8 - // keys [abi.OldMapBucketCount]keyType - // elems [abi.OldMapBucketCount]elemType - // overflow *bucket - // } - if t.MapType().OldBucket != nil { - return t.MapType().OldBucket - } - - keytype := t.Key() - elemtype := t.Elem() - types.CalcSize(keytype) - types.CalcSize(elemtype) - if keytype.Size() > abi.OldMapMaxKeyBytes { - keytype = types.NewPtr(keytype) - } - if elemtype.Size() > abi.OldMapMaxElemBytes { - elemtype = types.NewPtr(elemtype) - } - - field := make([]*types.Field, 0, 5) - - // The first field is: uint8 topbits[BUCKETSIZE]. - arr := types.NewArray(types.Types[types.TUINT8], abi.OldMapBucketCount) - field = append(field, makefield("topbits", arr)) - - arr = types.NewArray(keytype, abi.OldMapBucketCount) - arr.SetNoalg(true) - keys := makefield("keys", arr) - field = append(field, keys) - - arr = types.NewArray(elemtype, abi.OldMapBucketCount) - arr.SetNoalg(true) - elems := makefield("elems", arr) - field = append(field, elems) - - // If keys and elems have no pointers, the map implementation - // can keep a list of overflow pointers on the side so that - // buckets can be marked as having no pointers. - // Arrange for the bucket to have no pointers by changing - // the type of the overflow field to uintptr in this case. - // See comment on hmap.overflow in runtime/map.go. - otyp := types.Types[types.TUNSAFEPTR] - if !elemtype.HasPointers() && !keytype.HasPointers() { - otyp = types.Types[types.TUINTPTR] - } - overflow := makefield("overflow", otyp) - field = append(field, overflow) - - // link up fields - bucket := types.NewStruct(field[:]) - bucket.SetNoalg(true) - types.CalcSize(bucket) - - // Check invariants that map code depends on. - if !types.IsComparable(t.Key()) { - base.Fatalf("unsupported map key type for %v", t) - } - if abi.OldMapBucketCount < 8 { - base.Fatalf("bucket size %d too small for proper alignment %d", abi.OldMapBucketCount, 8) - } - if uint8(keytype.Alignment()) > abi.OldMapBucketCount { - base.Fatalf("key align too big for %v", t) - } - if uint8(elemtype.Alignment()) > abi.OldMapBucketCount { - base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.OldMapBucketCount) - } - if keytype.Size() > abi.OldMapMaxKeyBytes { - base.Fatalf("key size too large for %v", t) - } - if elemtype.Size() > abi.OldMapMaxElemBytes { - base.Fatalf("elem size too large for %v", t) - } - if t.Key().Size() > abi.OldMapMaxKeyBytes && !keytype.IsPtr() { - base.Fatalf("key indirect incorrect for %v", t) - } - if t.Elem().Size() > abi.OldMapMaxElemBytes && !elemtype.IsPtr() { - base.Fatalf("elem indirect incorrect for %v", t) - } - if keytype.Size()%keytype.Alignment() != 0 { - base.Fatalf("key size not a multiple of key align for %v", t) - } - if elemtype.Size()%elemtype.Alignment() != 0 { - base.Fatalf("elem size not a multiple of elem align for %v", t) - } - if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 { - base.Fatalf("bucket align not multiple of key align %v", t) - } - if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 { - base.Fatalf("bucket align not multiple of elem align %v", t) - } - if keys.Offset%keytype.Alignment() != 0 { - base.Fatalf("bad alignment of keys in bmap for %v", t) - } - if elems.Offset%elemtype.Alignment() != 0 { - base.Fatalf("bad alignment of elems in bmap for %v", t) - } - - // Double-check that overflow field is final memory in struct, - // with no padding at end. - if overflow.Offset != bucket.Size()-int64(types.PtrSize) { - base.Fatalf("bad offset of overflow in bmap for %v, overflow.Offset=%d, bucket.Size()-int64(types.PtrSize)=%d", - t, overflow.Offset, bucket.Size()-int64(types.PtrSize)) - } - - t.MapType().OldBucket = bucket - - bucket.StructType().Map = t - return bucket -} - -var oldHmapType *types.Type - -// OldMapType returns a type interchangeable with runtime.hmap. -// Make sure this stays in sync with runtime/map.go. -func OldMapType() *types.Type { - if oldHmapType != nil { - return oldHmapType - } - - // build a struct: - // type hmap struct { - // count int - // flags uint8 - // B uint8 - // noverflow uint16 - // hash0 uint32 - // buckets unsafe.Pointer - // oldbuckets unsafe.Pointer - // nevacuate uintptr - // clearSeq uint64 - // extra unsafe.Pointer // *mapextra - // } - // must match runtime/map.go:hmap. - fields := []*types.Field{ - makefield("count", types.Types[types.TINT]), - makefield("flags", types.Types[types.TUINT8]), - makefield("B", types.Types[types.TUINT8]), - makefield("noverflow", types.Types[types.TUINT16]), - makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP. - makefield("buckets", types.Types[types.TUNSAFEPTR]), // Used in walk.go for OMAKEMAP. - makefield("oldbuckets", types.Types[types.TUNSAFEPTR]), - makefield("nevacuate", types.Types[types.TUINTPTR]), - makefield("clearSeq", types.Types[types.TUINT64]), - makefield("extra", types.Types[types.TUNSAFEPTR]), - } - - n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hmap")) - hmap := types.NewNamed(n) - n.SetType(hmap) - n.SetTypecheck(1) - - hmap.SetUnderlying(types.NewStruct(fields)) - types.CalcSize(hmap) - - // The size of hmap should be 56 bytes on 64 bit - // and 36 bytes on 32 bit platforms. - if size := int64(2*8 + 5*types.PtrSize); hmap.Size() != size { - base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size) - } - - oldHmapType = hmap - return hmap -} - -var oldHiterType *types.Type - -// OldMapIterType returns a type interchangeable with runtime.hiter. -// Make sure this stays in sync with runtime/map.go. -func OldMapIterType() *types.Type { - if oldHiterType != nil { - return oldHiterType - } - - hmap := OldMapType() - - // build a struct: - // type hiter struct { - // key unsafe.Pointer // *Key - // elem unsafe.Pointer // *Elem - // t unsafe.Pointer // *OldMapType - // h *hmap - // buckets unsafe.Pointer - // bptr unsafe.Pointer // *bmap - // overflow unsafe.Pointer // *[]*bmap - // oldoverflow unsafe.Pointer // *[]*bmap - // startBucket uintptr - // offset uint8 - // wrapped bool - // B uint8 - // i uint8 - // bucket uintptr - // checkBucket uintptr - // clearSeq uint64 - // } - // must match runtime/map.go:hiter. - fields := []*types.Field{ - makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. - makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. - makefield("t", types.Types[types.TUNSAFEPTR]), - makefield("h", types.NewPtr(hmap)), - makefield("buckets", types.Types[types.TUNSAFEPTR]), - makefield("bptr", types.Types[types.TUNSAFEPTR]), - makefield("overflow", types.Types[types.TUNSAFEPTR]), - makefield("oldoverflow", types.Types[types.TUNSAFEPTR]), - makefield("startBucket", types.Types[types.TUINTPTR]), - makefield("offset", types.Types[types.TUINT8]), - makefield("wrapped", types.Types[types.TBOOL]), - makefield("B", types.Types[types.TUINT8]), - makefield("i", types.Types[types.TUINT8]), - makefield("bucket", types.Types[types.TUINTPTR]), - makefield("checkBucket", types.Types[types.TUINTPTR]), - makefield("clearSeq", types.Types[types.TUINT64]), - } - - // build iterator struct holding the above fields - n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.Runtime.Lookup("hiter")) - hiter := types.NewNamed(n) - n.SetType(hiter) - n.SetTypecheck(1) - - hiter.SetUnderlying(types.NewStruct(fields)) - types.CalcSize(hiter) - if hiter.Size() != int64(8+12*types.PtrSize) { - base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 8+12*types.PtrSize) - } - - oldHiterType = hiter - return hiter -} - -func writeOldMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) { - // internal/abi.OldMapType - s1 := writeType(t.Key()) - s2 := writeType(t.Elem()) - s3 := writeType(OldMapBucketType(t)) - hasher := genhash(t.Key()) - - c.Field("Key").WritePtr(s1) - c.Field("Elem").WritePtr(s2) - c.Field("Bucket").WritePtr(s3) - c.Field("Hasher").WritePtr(hasher) - var flags uint32 - // Note: flags must match maptype accessors in ../../../../runtime/type.go - // and maptype builder in ../../../../reflect/type.go:MapOf. - if t.Key().Size() > abi.OldMapMaxKeyBytes { - c.Field("KeySize").WriteUint8(uint8(types.PtrSize)) - flags |= 1 // indirect key - } else { - c.Field("KeySize").WriteUint8(uint8(t.Key().Size())) - } - - if t.Elem().Size() > abi.OldMapMaxElemBytes { - c.Field("ValueSize").WriteUint8(uint8(types.PtrSize)) - flags |= 2 // indirect value - } else { - c.Field("ValueSize").WriteUint8(uint8(t.Elem().Size())) - } - c.Field("BucketSize").WriteUint16(uint16(OldMapBucketType(t).Size())) - if types.IsReflexive(t.Key()) { - flags |= 4 // reflexive key - } - if needkeyupdate(t.Key()) { - flags |= 8 // need key update - } - if hashMightPanic(t.Key()) { - flags |= 16 // hash might panic - } - c.Field("Flags").WriteUint32(flags) - - if u := t.Underlying(); u != t { - // If t is a named map type, also keep the underlying map - // type live in the binary. This is important to make sure that - // a named map and that same map cast to its underlying type via - // reflection, use the same hash function. See issue 37716. - lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)}) - } -} diff --git a/src/cmd/compile/internal/reflectdata/map_swiss.go b/src/cmd/compile/internal/reflectdata/map_swiss.go deleted file mode 100644 index 54266a604a..0000000000 --- a/src/cmd/compile/internal/reflectdata/map_swiss.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package reflectdata - -import ( - "cmd/compile/internal/base" - "cmd/compile/internal/ir" - "cmd/compile/internal/rttype" - "cmd/compile/internal/types" - "cmd/internal/obj" - "cmd/internal/objabi" - "cmd/internal/src" - "internal/abi" -) - -// SwissMapGroupType makes the map slot group type given the type of the map. -func SwissMapGroupType(t *types.Type) *types.Type { - if t.MapType().SwissGroup != nil { - return t.MapType().SwissGroup - } - - // Builds a type representing a group structure for the given map type. - // This type is not visible to users, we include it so we can generate - // a correct GC program for it. - // - // Make sure this stays in sync with internal/runtime/maps/group.go. - // - // type group struct { - // ctrl uint64 - // slots [abi.SwissMapGroupSlots]struct { - // key keyType - // elem elemType - // } - // } - - keytype := t.Key() - elemtype := t.Elem() - types.CalcSize(keytype) - types.CalcSize(elemtype) - if keytype.Size() > abi.SwissMapMaxKeyBytes { - keytype = types.NewPtr(keytype) - } - if elemtype.Size() > abi.SwissMapMaxElemBytes { - elemtype = types.NewPtr(elemtype) - } - - slotFields := []*types.Field{ - makefield("key", keytype), - makefield("elem", elemtype), - } - slot := types.NewStruct(slotFields) - slot.SetNoalg(true) - - slotArr := types.NewArray(slot, abi.SwissMapGroupSlots) - slotArr.SetNoalg(true) - - fields := []*types.Field{ - makefield("ctrl", types.Types[types.TUINT64]), - makefield("slots", slotArr), - } - - group := types.NewStruct(fields) - group.SetNoalg(true) - types.CalcSize(group) - - // Check invariants that map code depends on. - if !types.IsComparable(t.Key()) { - base.Fatalf("unsupported map key type for %v", t) - } - if group.Size() <= 8 { - // internal/runtime/maps creates pointers to slots, even if - // both key and elem are size zero. In this case, each slot is - // size 0, but group should still reserve a word of padding at - // the end to ensure pointers are valid. - base.Fatalf("bad group size for %v", t) - } - if t.Key().Size() > abi.SwissMapMaxKeyBytes && !keytype.IsPtr() { - base.Fatalf("key indirect incorrect for %v", t) - } - if t.Elem().Size() > abi.SwissMapMaxElemBytes && !elemtype.IsPtr() { - base.Fatalf("elem indirect incorrect for %v", t) - } - - t.MapType().SwissGroup = group - group.StructType().Map = t - return group -} - -var cachedSwissTableType *types.Type - -// swissTableType returns a type interchangeable with internal/runtime/maps.table. -// Make sure this stays in sync with internal/runtime/maps/table.go. -func swissTableType() *types.Type { - if cachedSwissTableType != nil { - return cachedSwissTableType - } - - // type table struct { - // used uint16 - // capacity uint16 - // growthLeft uint16 - // localDepth uint8 - // // N.B Padding - // - // index int - // - // // From groups. - // groups_data unsafe.Pointer - // groups_lengthMask uint64 - // } - // must match internal/runtime/maps/table.go:table. - fields := []*types.Field{ - makefield("used", types.Types[types.TUINT16]), - makefield("capacity", types.Types[types.TUINT16]), - makefield("growthLeft", types.Types[types.TUINT16]), - makefield("localDepth", types.Types[types.TUINT8]), - makefield("index", types.Types[types.TINT]), - makefield("groups_data", types.Types[types.TUNSAFEPTR]), - makefield("groups_lengthMask", types.Types[types.TUINT64]), - } - - n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("table")) - table := types.NewNamed(n) - n.SetType(table) - n.SetTypecheck(1) - - table.SetUnderlying(types.NewStruct(fields)) - types.CalcSize(table) - - // The size of table should be 32 bytes on 64 bit - // and 24 bytes on 32 bit platforms. - if size := int64(3*2 + 2*1 /* one extra for padding */ + 1*8 + 2*types.PtrSize); table.Size() != size { - base.Fatalf("internal/runtime/maps.table size not correct: got %d, want %d", table.Size(), size) - } - - cachedSwissTableType = table - return table -} - -var cachedSwissMapType *types.Type - -// SwissMapType returns a type interchangeable with internal/runtime/maps.Map. -// Make sure this stays in sync with internal/runtime/maps/map.go. -func SwissMapType() *types.Type { - if cachedSwissMapType != nil { - return cachedSwissMapType - } - - // type Map struct { - // used uint64 - // seed uintptr - // - // dirPtr unsafe.Pointer - // dirLen int - // - // globalDepth uint8 - // globalShift uint8 - // - // writing uint8 - // tombstonePossible bool - // // N.B Padding - // - // clearSeq uint64 - // } - // must match internal/runtime/maps/map.go:Map. - fields := []*types.Field{ - makefield("used", types.Types[types.TUINT64]), - makefield("seed", types.Types[types.TUINTPTR]), - makefield("dirPtr", types.Types[types.TUNSAFEPTR]), - makefield("dirLen", types.Types[types.TINT]), - makefield("globalDepth", types.Types[types.TUINT8]), - makefield("globalShift", types.Types[types.TUINT8]), - makefield("writing", types.Types[types.TUINT8]), - makefield("tombstonePossible", types.Types[types.TBOOL]), - makefield("clearSeq", types.Types[types.TUINT64]), - } - - n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Map")) - m := types.NewNamed(n) - n.SetType(m) - n.SetTypecheck(1) - - m.SetUnderlying(types.NewStruct(fields)) - types.CalcSize(m) - - // The size of Map should be 48 bytes on 64 bit - // and 32 bytes on 32 bit platforms. - if size := int64(2*8 + 4*types.PtrSize /* one extra for globalDepth/globalShift/writing + padding */); m.Size() != size { - base.Fatalf("internal/runtime/maps.Map size not correct: got %d, want %d", m.Size(), size) - } - - cachedSwissMapType = m - return m -} - -var cachedSwissIterType *types.Type - -// SwissMapIterType returns a type interchangeable with runtime.hiter. -// Make sure this stays in sync with runtime/map.go. -func SwissMapIterType() *types.Type { - if cachedSwissIterType != nil { - return cachedSwissIterType - } - - // type Iter struct { - // key unsafe.Pointer // *Key - // elem unsafe.Pointer // *Elem - // typ unsafe.Pointer // *SwissMapType - // m *Map - // - // groupSlotOffset uint64 - // dirOffset uint64 - // - // clearSeq uint64 - // - // globalDepth uint8 - // // N.B. padding - // - // dirIdx int - // - // tab *table - // - // group unsafe.Pointer // actually groupReference.data - // - // entryIdx uint64 - // } - // must match internal/runtime/maps/table.go:Iter. - fields := []*types.Field{ - makefield("key", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. - makefield("elem", types.Types[types.TUNSAFEPTR]), // Used in range.go for TMAP. - makefield("typ", types.Types[types.TUNSAFEPTR]), - makefield("m", types.NewPtr(SwissMapType())), - makefield("groupSlotOffset", types.Types[types.TUINT64]), - makefield("dirOffset", types.Types[types.TUINT64]), - makefield("clearSeq", types.Types[types.TUINT64]), - makefield("globalDepth", types.Types[types.TUINT8]), - makefield("dirIdx", types.Types[types.TINT]), - makefield("tab", types.NewPtr(swissTableType())), - makefield("group", types.Types[types.TUNSAFEPTR]), - makefield("entryIdx", types.Types[types.TUINT64]), - } - - // build iterator struct holding the above fields - n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.Pkgs.InternalMaps.Lookup("Iter")) - iter := types.NewNamed(n) - n.SetType(iter) - n.SetTypecheck(1) - - iter.SetUnderlying(types.NewStruct(fields)) - types.CalcSize(iter) - - // The size of Iter should be 96 bytes on 64 bit - // and 64 bytes on 32 bit platforms. - if size := 8*types.PtrSize /* one extra for globalDepth + padding */ + 4*8; iter.Size() != int64(size) { - base.Fatalf("internal/runtime/maps.Iter size not correct: got %d, want %d", iter.Size(), size) - } - - cachedSwissIterType = iter - return iter -} - -func writeSwissMapType(t *types.Type, lsym *obj.LSym, c rttype.Cursor) { - // internal/abi.SwissMapType - gtyp := SwissMapGroupType(t) - s1 := writeType(t.Key()) - s2 := writeType(t.Elem()) - s3 := writeType(gtyp) - hasher := genhash(t.Key()) - - slotTyp := gtyp.Field(1).Type.Elem() - elemOff := slotTyp.Field(1).Offset - if AlgType(t.Key()) == types.AMEM64 && elemOff != 8 { - base.Fatalf("runtime assumes elemOff for 8-byte keys is 8, got %d", elemOff) - } - if AlgType(t.Key()) == types.ASTRING && elemOff != int64(2*types.PtrSize) { - base.Fatalf("runtime assumes elemOff for string keys is %d, got %d", 2*types.PtrSize, elemOff) - } - - c.Field("Key").WritePtr(s1) - c.Field("Elem").WritePtr(s2) - c.Field("Group").WritePtr(s3) - c.Field("Hasher").WritePtr(hasher) - c.Field("GroupSize").WriteUintptr(uint64(gtyp.Size())) - c.Field("SlotSize").WriteUintptr(uint64(slotTyp.Size())) - c.Field("ElemOff").WriteUintptr(uint64(elemOff)) - var flags uint32 - if needkeyupdate(t.Key()) { - flags |= abi.SwissMapNeedKeyUpdate - } - if hashMightPanic(t.Key()) { - flags |= abi.SwissMapHashMightPanic - } - if t.Key().Size() > abi.SwissMapMaxKeyBytes { - flags |= abi.SwissMapIndirectKey - } - if t.Elem().Size() > abi.SwissMapMaxKeyBytes { - flags |= abi.SwissMapIndirectElem - } - c.Field("Flags").WriteUint32(flags) - - if u := t.Underlying(); u != t { - // If t is a named map type, also keep the underlying map - // type live in the binary. This is important to make sure that - // a named map and that same map cast to its underlying type via - // reflection, use the same hash function. See issue 37716. - lsym.AddRel(base.Ctxt, obj.Reloc{Type: objabi.R_KEEP, Sym: writeType(u)}) - } -} diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index 62cef16816..360536b427 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -8,7 +8,6 @@ import ( "encoding/binary" "fmt" "internal/abi" - "internal/buildcfg" "slices" "sort" "strings" @@ -773,11 +772,7 @@ func writeType(t *types.Type) *obj.LSym { rt = rttype.InterfaceType dataAdd = len(imethods(t)) * int(rttype.IMethod.Size()) case types.TMAP: - if buildcfg.Experiment.SwissMap { - rt = rttype.SwissMapType - } else { - rt = rttype.OldMapType - } + rt = rttype.MapType case types.TPTR: rt = rttype.PtrType // TODO: use rttype.Type for Elem() is ANY? @@ -877,11 +872,7 @@ func writeType(t *types.Type) *obj.LSym { } case types.TMAP: - if buildcfg.Experiment.SwissMap { - writeSwissMapType(t, lsym, c) - } else { - writeOldMapType(t, lsym, c) - } + writeSwissMapType(t, lsym, c) case types.TPTR: // internal/abi.PtrType diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go index aaf98dda15..670129c7d5 100644 --- a/src/cmd/compile/internal/rttype/rttype.go +++ b/src/cmd/compile/internal/rttype/rttype.go @@ -27,8 +27,7 @@ var ArrayType *types.Type var ChanType *types.Type var FuncType *types.Type var InterfaceType *types.Type -var OldMapType *types.Type -var SwissMapType *types.Type +var MapType *types.Type var PtrType *types.Type var SliceType *types.Type var StructType *types.Type @@ -55,8 +54,7 @@ func Init() { ChanType = FromReflect(reflect.TypeOf(abi.ChanType{})) FuncType = FromReflect(reflect.TypeOf(abi.FuncType{})) InterfaceType = FromReflect(reflect.TypeOf(abi.InterfaceType{})) - OldMapType = FromReflect(reflect.TypeOf(abi.OldMapType{})) - SwissMapType = FromReflect(reflect.TypeOf(abi.SwissMapType{})) + MapType = FromReflect(reflect.TypeOf(abi.SwissMapType{})) PtrType = FromReflect(reflect.TypeOf(abi.PtrType{})) SliceType = FromReflect(reflect.TypeOf(abi.SliceType{})) StructType = FromReflect(reflect.TypeOf(abi.StructType{})) diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 053bfc3093..2a0b7991ae 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -94,12 +94,8 @@ func InitConfig() { _ = types.NewPtr(types.Types[types.TINT16]) // *int16 _ = types.NewPtr(types.Types[types.TINT64]) // *int64 _ = types.NewPtr(types.ErrorType) // *error - if buildcfg.Experiment.SwissMap { - _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map - } else { - _ = types.NewPtr(reflectdata.OldMapType()) // *runtime.hmap - } - _ = types.NewPtr(deferstruct()) // *runtime._defer + _ = types.NewPtr(reflectdata.SwissMapType()) // *internal/runtime/maps.Map + _ = types.NewPtr(deferstruct()) // *runtime._defer types.NewPtrCacheEnabled = false ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat) ssaConfig.Race = base.Flag.Race @@ -3083,13 +3079,8 @@ func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value { return v } - // map <--> *hmap - var mt *types.Type - if buildcfg.Experiment.SwissMap { - mt = types.NewPtr(reflectdata.SwissMapType()) - } else { - mt = types.NewPtr(reflectdata.OldMapType()) - } + // map <--> *internal/runtime/maps.Map + mt := types.NewPtr(reflectdata.SwissMapType()) if to.Kind() == types.TMAP && from == mt { return v } @@ -5759,13 +5750,13 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value { s.startBlock(bElse) switch n.Op() { case ir.OLEN: - if buildcfg.Experiment.SwissMap && n.X.Type().IsMap() { - // length is stored in the first word. + if n.X.Type().IsMap() { + // length is stored in the first word, but needs conversion to int. loadType := reflectdata.SwissMapType().Field(0).Type // uint64 load := s.load(loadType, x) s.vars[n] = s.conv(nil, load, loadType, lenType) // integer conversion doesn't need Node } else { - // length is stored in the first word for map/chan + // length is stored in the first word for chan, no conversion needed. s.vars[n] = s.load(lenType, x) } case ir.OCAP: diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go index ace95f78a6..eda6084b48 100644 --- a/src/cmd/compile/internal/test/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -6,7 +6,6 @@ package test import ( "bufio" - "internal/goexperiment" "internal/testenv" "io" "math/bits" @@ -234,15 +233,6 @@ func TestIntendedInlining(t *testing.T) { }, } - if !goexperiment.SwissMap { - // Maps - want["runtime"] = append(want["runtime"], "bucketMask") - want["runtime"] = append(want["runtime"], "bucketShift") - want["runtime"] = append(want["runtime"], "evacuated") - want["runtime"] = append(want["runtime"], "tophash") - want["runtime"] = append(want["runtime"], "(*bmap).keys") - want["runtime"] = append(want["runtime"], "(*bmap).overflow") - } if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { // nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable. // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386. diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go index 628c7e2fbc..296bfdc281 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -152,14 +152,12 @@ func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (v func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any) func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any) func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any) -func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) // old maps -func mapIterStart(mapType *byte, hmap map[any]any, hiter *any) // swiss maps +func mapIterStart(mapType *byte, hmap map[any]any, hiter *any) func mapdelete(mapType *byte, hmap map[any]any, key *any) func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32) func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64) func mapdelete_faststr(mapType *byte, hmap map[any]any, key string) -func mapiternext(hiter *any) // old maps -func mapIterNext(hiter *any) // swiss maps +func mapIterNext(hiter *any) func mapclear(mapType *byte, hmap map[any]any) // *byte is really *runtime.Type diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index 730ed4146a..535f0fb7e8 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -130,13 +130,11 @@ var runtimeDecls = [...]struct { {"mapassign_fast64", funcTag, 85}, {"mapassign_fast64ptr", funcTag, 93}, {"mapassign_faststr", funcTag, 86}, - {"mapiterinit", funcTag, 94}, {"mapIterStart", funcTag, 94}, {"mapdelete", funcTag, 94}, {"mapdelete_fast32", funcTag, 95}, {"mapdelete_fast64", funcTag, 96}, {"mapdelete_faststr", funcTag, 97}, - {"mapiternext", funcTag, 98}, {"mapIterNext", funcTag, 98}, {"mapclear", funcTag, 99}, {"makechan64", funcTag, 101}, diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go index 139defafe2..697c62cbc9 100644 --- a/src/cmd/compile/internal/types/fmt.go +++ b/src/cmd/compile/internal/types/fmt.go @@ -471,11 +471,9 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type case TSTRUCT: if m := t.StructType().Map; m != nil { mt := m.MapType() - // Format the bucket struct for map[x]y as map.bucket[x]y. + // Format the bucket struct for map[x]y as map.group[x]y. // This avoids a recursive print that generates very long names. switch t { - case mt.OldBucket: - b.WriteString("map.bucket[") case mt.SwissGroup: b.WriteString("map.group[") default: diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go index 3b2aeece3e..ba033ec499 100644 --- a/src/cmd/compile/internal/types/sizeof_test.go +++ b/src/cmd/compile/internal/types/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { }{ {Sym{}, 32, 64}, {Type{}, 60, 96}, - {Map{}, 16, 32}, + {Map{}, 12, 24}, {Forward{}, 20, 32}, {Func{}, 32, 56}, {Struct{}, 12, 24}, diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index c4080ed0b5..47bffb68f4 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -10,7 +10,6 @@ import ( "cmd/internal/src" "fmt" "go/constant" - "internal/buildcfg" "internal/types/errors" "sync" ) @@ -281,16 +280,6 @@ type Map struct { Key *Type // Key type Elem *Type // Val (elem) type - // Note: It would be cleaner to completely split Map into OldMap and - // SwissMap, but 99% of the types map code doesn't care about the - // implementation at all, so it is tons of churn to split the type. - // Only code that looks at the bucket field can care about the - // implementation. - - // GOEXPERIMENT=noswissmap fields - OldBucket *Type // internal struct type representing a hash bucket - - // GOEXPERIMENT=swissmap fields SwissGroup *Type // internal struct type representing a slot group } @@ -1189,37 +1178,20 @@ func (t *Type) cmp(x *Type) Cmp { // by the general code after the switch. case TSTRUCT: - if buildcfg.Experiment.SwissMap { - // Is this a map group type? - if t.StructType().Map == nil { - if x.StructType().Map != nil { - return CMPlt // nil < non-nil - } - // to the fallthrough - } else if x.StructType().Map == nil { - return CMPgt // nil > non-nil - } - // Both have non-nil Map, fallthrough to the general - // case. Note that the map type does not directly refer - // to the group type (it uses unsafe.Pointer). If it - // did, this would need special handling to avoid - // infinite recursion. - } else { - // Is this a map bucket type? - if t.StructType().Map == nil { - if x.StructType().Map != nil { - return CMPlt // nil < non-nil - } - // to the fallthrough - } else if x.StructType().Map == nil { - return CMPgt // nil > non-nil + // Is this a map group type? + if t.StructType().Map == nil { + if x.StructType().Map != nil { + return CMPlt // nil < non-nil } - // Both have non-nil Map, fallthrough to the general - // case. Note that the map type does not directly refer - // to the bucket type (it uses unsafe.Pointer). If it - // did, this would need special handling to avoid - // infinite recursion. + // to the general case + } else if x.StructType().Map == nil { + return CMPgt // nil > non-nil } + // Both have non-nil Map, fallthrough to the general + // case. Note that the map type does not directly refer + // to the group type (it uses unsafe.Pointer). If it + // did, this would need special handling to avoid + // infinite recursion. tfs := t.Fields() xfs := x.Fields() diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 3b5985d130..1cd3126ce1 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -9,7 +9,6 @@ import ( "go/constant" "go/token" "internal/abi" - "internal/buildcfg" "strings" "cmd/compile/internal/base" @@ -313,13 +312,6 @@ func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // walkMakeMap walks an OMAKEMAP node. func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { - if buildcfg.Experiment.SwissMap { - return walkMakeSwissMap(n, init) - } - return walkMakeOldMap(n, init) -} - -func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { t := n.Type() mapType := reflectdata.SwissMapType() hint := n.Len @@ -366,12 +358,12 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty)) // g.ctrl = abi.SwissMapCtrlEmpty - csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map_swiss.go + csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map.go ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty) nif.Body.Append(ca) // m.dirPtr = g - dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map_swiss.go + dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map.go na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, dsym), typecheck.ConvNop(g, types.Types[types.TUNSAFEPTR])) nif.Body.Append(na) appendWalkStmt(init, nif) @@ -391,7 +383,7 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // m map has been allocated on the stack already. // m.seed = uintptr(rand()) rand := mkcall("rand", types.Types[types.TUINT64], init) - seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map_swiss.go + seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map.go appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, seedSym), typecheck.Conv(rand, types.Types[types.TUINTPTR]))) return typecheck.ConvNop(m, t) } @@ -428,101 +420,6 @@ func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), m) } -func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { - t := n.Type() - hmapType := reflectdata.OldMapType() - hint := n.Len - - // var h *hmap - var h ir.Node - if n.Esc() == ir.EscNone { - // Allocate hmap on stack. - - // var hv hmap - // h = &hv - h = stackTempAddr(init, hmapType) - - // Allocate one bucket pointed to by hmap.buckets on stack if hint - // is not larger than BUCKETSIZE. In case hint is larger than - // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. - // Maximum key and elem size is 128 bytes, larger objects - // are stored with an indirection. So max bucket size is 2048+eps. - if !ir.IsConst(hint, constant.Int) || - constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) { - - // In case hint is larger than BUCKETSIZE runtime.makemap - // will allocate the buckets on the heap, see #20184 - // - // if hint <= BUCKETSIZE { - // var bv bmap - // b = &bv - // h.buckets = b - // } - - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil) - nif.Likely = true - - // var bv bmap - // b = &bv - b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t)) - - // h.buckets = b - bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap - na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR])) - nif.Body.Append(na) - appendWalkStmt(init, nif) - } - } - - if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) { - // Handling make(map[any]any) and - // make(map[any]any, hint) where hint <= BUCKETSIZE - // special allows for faster map initialization and - // improves binary size by using calls with fewer arguments. - // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false - // and no buckets will be allocated by makemap. Therefore, - // no buckets need to be allocated in this code path. - if n.Esc() == ir.EscNone { - // Only need to initialize h.hash0 since - // hmap h has been allocated on the stack already. - // h.hash0 = rand32() - rand := mkcall("rand32", types.Types[types.TUINT32], init) - hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap - appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) - return typecheck.ConvNop(h, t) - } - // Call runtime.makemap_small to allocate an - // hmap on the heap and initialize hmap's hash0 field. - fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init) - } - - if n.Esc() != ir.EscNone { - h = typecheck.NodNil() - } - // Map initialization with a variable or large hint is - // more complicated. We therefore generate a call to - // runtime.makemap to initialize hmap and allocate the - // map buckets. - - // When hint fits into int, use makemap instead of - // makemap64, which is faster and shorter on 32 bit platforms. - fnname := "makemap64" - argtype := types.Types[types.TINT64] - - // Type checking guarantees that TIDEAL hint is positive and fits in an int. - // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. - // The case of hint overflow when converting TUINT or TUINTPTR to TINT - // will be handled by the negative range checks in makemap during runtime. - if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { - fnname = "makemap" - argtype = types.Types[types.TINT] - } - - fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem()) - return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h) -} - // walkMakeSlice walks an OMAKESLICE node. func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { len := n.Len diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index daa7f92617..41498fa018 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -8,7 +8,6 @@ import ( "fmt" "go/constant" "internal/abi" - "internal/buildcfg" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -967,12 +966,8 @@ func (o *orderState) stmt(n ir.Node) { n.X = o.copyExpr(r) // n.Prealloc is the temp for the iterator. - // MapIterType contains pointers and needs to be zeroed. - if buildcfg.Experiment.SwissMap { - n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true) - } else { - n.Prealloc = o.newTemp(reflectdata.OldMapIterType(), true) - } + // SwissMapIterType contains pointers and needs to be zeroed. + n.Prealloc = o.newTemp(reflectdata.SwissMapIterType(), true) } n.Key = o.exprInPlace(n.Key) n.Value = o.exprInPlace(n.Value) diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go index 3d3547b84b..de42605bd7 100644 --- a/src/cmd/compile/internal/walk/range.go +++ b/src/cmd/compile/internal/walk/range.go @@ -6,7 +6,6 @@ package walk import ( "go/constant" - "internal/buildcfg" "unicode/utf8" "cmd/compile/internal/base" @@ -247,20 +246,11 @@ func walkRange(nrange *ir.RangeStmt) ir.Node { hit := nrange.Prealloc th := hit.Type() // depends on layout of iterator struct. - // See cmd/compile/internal/reflectdata/reflect.go:MapIterType - var keysym, elemsym *types.Sym - var iterInit, iterNext string - if buildcfg.Experiment.SwissMap { - keysym = th.Field(0).Sym - elemsym = th.Field(1).Sym // ditto - iterInit = "mapIterStart" - iterNext = "mapIterNext" - } else { - keysym = th.Field(0).Sym - elemsym = th.Field(1).Sym // ditto - iterInit = "mapiterinit" - iterNext = "mapiternext" - } + // See cmd/compile/internal/reflectdata/map.go:SwissMapIterType + keysym := th.Field(0).Sym + elemsym := th.Field(1).Sym // ditto + iterInit := "mapIterStart" + iterNext := "mapIterNext" fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th) init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit))) diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 2fa51f1280..7012f5cd43 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -7,7 +7,6 @@ package walk import ( "fmt" "internal/abi" - "internal/buildcfg" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -192,42 +191,7 @@ var mapassign = mkmapnames("mapassign", "ptr") var mapdelete = mkmapnames("mapdelete", "") func mapfast(t *types.Type) int { - if buildcfg.Experiment.SwissMap { - return mapfastSwiss(t) - } - return mapfastOld(t) -} - -func mapfastSwiss(t *types.Type) int { - if t.Elem().Size() > abi.OldMapMaxElemBytes { - return mapslow - } - switch reflectdata.AlgType(t.Key()) { - case types.AMEM32: - if !t.Key().HasPointers() { - return mapfast32 - } - if types.PtrSize == 4 { - return mapfast32ptr - } - base.Fatalf("small pointer %v", t.Key()) - case types.AMEM64: - if !t.Key().HasPointers() { - return mapfast64 - } - if types.PtrSize == 8 { - return mapfast64ptr - } - // Two-word object, at least one of which is a pointer. - // Use the slow path. - case types.ASTRING: - return mapfaststr - } - return mapslow -} - -func mapfastOld(t *types.Type) int { - if t.Elem().Size() > abi.OldMapMaxElemBytes { + if t.Elem().Size() > abi.SwissMapMaxElemBytes { return mapslow } switch reflectdata.AlgType(t.Key()) { diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index cdf7deb31b..d042b23827 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -560,13 +560,9 @@ func (d *deadcodePass) decodetypeMethods(ldr *loader.Loader, arch *sys.Arch, sym case abi.Chan: // reflect.chanType off += 2 * arch.PtrSize case abi.Map: - if buildcfg.Experiment.SwissMap { - off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType - if arch.PtrSize == 8 { - off += 4 // padding for final uint32 field (Flags). - } - } else { - off += 4*arch.PtrSize + 8 // internal/abi.OldMapType + off += 7*arch.PtrSize + 4 // internal/abi.SwissMapType + if arch.PtrSize == 8 { + off += 4 // padding for final uint32 field (Flags). } case abi.Interface: // reflect.interfaceType off += 3 * arch.PtrSize diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index 602d70ddb9..73b3151829 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -872,14 +872,6 @@ func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valna } func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { - if buildcfg.Experiment.SwissMap { - d.synthesizemaptypesSwiss(ctxt, die) - } else { - d.synthesizemaptypesOld(ctxt, die) - } -} - -func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) { mapType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.Map")) tableType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.table")) groupsReferenceType := walktypedef(d.findprotodie(ctxt, "type:internal/runtime/maps.groupsReference")) @@ -941,102 +933,6 @@ func (d *dwctxt) synthesizemaptypesSwiss(ctxt *Link, die *dwarf.DWDie) { } } -func (d *dwctxt) synthesizemaptypesOld(ctxt *Link, die *dwarf.DWDie) { - hash := walktypedef(d.findprotodie(ctxt, "type:runtime.hmap")) - bucket := walktypedef(d.findprotodie(ctxt, "type:runtime.bmap")) - - if hash == nil { - return - } - - for ; die != nil; die = die.Link { - if die.Abbrev != dwarf.DW_ABRV_MAPTYPE { - continue - } - gotype := loader.Sym(getattr(die, dwarf.DW_AT_type).Data.(dwSym)) - keytype := decodetypeMapKey(d.ldr, d.arch, gotype) - valtype := decodetypeMapValue(d.ldr, d.arch, gotype) - keydata := d.ldr.Data(keytype) - valdata := d.ldr.Data(valtype) - keysize, valsize := decodetypeSize(d.arch, keydata), decodetypeSize(d.arch, valdata) - keytype, valtype = d.walksymtypedef(d.defgotype(keytype)), d.walksymtypedef(d.defgotype(valtype)) - - // compute size info like hashmap.c does. - indirectKey, indirectVal := false, false - if keysize > abi.OldMapMaxKeyBytes { - keysize = int64(d.arch.PtrSize) - indirectKey = true - } - if valsize > abi.OldMapMaxElemBytes { - valsize = int64(d.arch.PtrSize) - indirectVal = true - } - - // Construct type to represent an array of BucketSize keys - keyname := d.nameFromDIESym(keytype) - dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) { - newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*keysize, 0) - t := keytype - if indirectKey { - t = d.defptrto(keytype) - } - d.newrefattr(dwhk, dwarf.DW_AT_type, t) - fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size") - newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0) - d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) - }) - - // Construct type to represent an array of BucketSize values - valname := d.nameFromDIESym(valtype) - dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) { - newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount*valsize, 0) - t := valtype - if indirectVal { - t = d.defptrto(valtype) - } - d.newrefattr(dwhv, dwarf.DW_AT_type, t) - fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size") - newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount, 0) - d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) - }) - - // Construct bucket - dwhbs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "bucket", keyname, valname, func(dwhb *dwarf.DWDie) { - // Copy over all fields except the field "data" from the generic - // bucket. "data" will be replaced with keys/values below. - d.copychildrenexcept(ctxt, dwhb, bucket, findchild(bucket, "data")) - - fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys") - d.newrefattr(fld, dwarf.DW_AT_type, dwhks) - newmemberoffsetattr(fld, abi.OldMapBucketCount) - fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values") - d.newrefattr(fld, dwarf.DW_AT_type, dwhvs) - newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*int32(keysize)) - fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow") - d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym))) - newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))) - if d.arch.RegSize > d.arch.PtrSize { - fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad") - d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) - newmemberoffsetattr(fld, abi.OldMapBucketCount+abi.OldMapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize)) - } - - newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.OldMapBucketCount+abi.OldMapBucketCount*keysize+abi.OldMapBucketCount*valsize+int64(d.arch.RegSize), 0) - }) - - // Construct hash - dwhs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "hash", keyname, valname, func(dwh *dwarf.DWDie) { - d.copychildren(ctxt, dwh, hash) - d.substitutetype(dwh, "buckets", d.defptrto(dwhbs)) - d.substitutetype(dwh, "oldbuckets", d.defptrto(dwhbs)) - newattr(dwh, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, getattr(hash, dwarf.DW_AT_byte_size).Value, nil) - }) - - // make map type a pointer to hash - d.newrefattr(die, dwarf.DW_AT_type, d.defptrto(dwhs)) - } -} - func (d *dwctxt) synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { sudog := walktypedef(d.findprotodie(ctxt, "type:runtime.sudog")) waitq := walktypedef(d.findprotodie(ctxt, "type:runtime.waitq")) @@ -2010,19 +1906,14 @@ func dwarfGenerateDebugInfo(ctxt *Link) { // Prototypes needed for type synthesis. prototypedies = map[string]*dwarf.DWDie{ - "type:runtime.stringStructDWARF": nil, - "type:runtime.slice": nil, - "type:runtime.sudog": nil, - "type:runtime.waitq": nil, - "type:runtime.hchan": nil, - } - if buildcfg.Experiment.SwissMap { - prototypedies["type:internal/runtime/maps.Map"] = nil - prototypedies["type:internal/runtime/maps.table"] = nil - prototypedies["type:internal/runtime/maps.groupsReference"] = nil - } else { - prototypedies["type:runtime.hmap"] = nil - prototypedies["type:runtime.bmap"] = nil + "type:runtime.stringStructDWARF": nil, + "type:runtime.slice": nil, + "type:runtime.sudog": nil, + "type:runtime.waitq": nil, + "type:runtime.hchan": nil, + "type:internal/runtime/maps.Map": nil, + "type:internal/runtime/maps.table": nil, + "type:internal/runtime/maps.groupsReference": nil, } // Needed by the prettyprinter code for interface inspection. @@ -2034,16 +1925,12 @@ func dwarfGenerateDebugInfo(ctxt *Link) { "type:internal/abi.PtrType", "type:internal/abi.SliceType", "type:internal/abi.StructType", + "type:internal/abi.SwissMapType", "type:internal/abi.InterfaceType", "type:internal/abi.ITab", "type:internal/abi.Imethod"} { d.defgotype(d.lookupOrDiag(typ)) } - if buildcfg.Experiment.SwissMap { - d.defgotype(d.lookupOrDiag("type:internal/abi.SwissMapType")) - } else { - d.defgotype(d.lookupOrDiag("type:internal/abi.OldMapType")) - } // fake root DIE for compile unit DIEs var dwroot dwarf.DWDie diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index ab086c57f4..ded7794dc3 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -63,6 +63,7 @@ func TestRuntimeTypesPresent(t *testing.T) { "internal/abi.PtrType": true, "internal/abi.SliceType": true, "internal/abi.StructType": true, + "internal/abi.SwissMapType": true, "internal/abi.InterfaceType": true, "internal/abi.ITab": true, } @@ -71,16 +72,6 @@ func TestRuntimeTypesPresent(t *testing.T) { if len(found) != len(want) { t.Errorf("found %v, want %v", found, want) } - - // Must have one of OldMapType or SwissMapType. - want = map[string]bool{ - "internal/abi.OldMapType": true, - "internal/abi.SwissMapType": true, - } - found = findTypes(t, dwarf, want) - if len(found) != 1 { - t.Errorf("map type want one of %v found %v", want, found) - } } func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[string]bool) { diff --git a/src/hash/maphash/maphash_runtime.go b/src/hash/maphash/maphash_runtime.go index 91e7d49e2c..14e2a0c75f 100644 --- a/src/hash/maphash/maphash_runtime.go +++ b/src/hash/maphash/maphash_runtime.go @@ -9,7 +9,6 @@ package maphash import ( "internal/abi" "internal/goarch" - "internal/goexperiment" "unsafe" ) @@ -51,12 +50,7 @@ func comparableHash[T comparable](v T, seed Seed) uint64 { s := seed.s var m map[T]struct{} mTyp := abi.TypeOf(m) - var hasher func(unsafe.Pointer, uintptr) uintptr - if goexperiment.SwissMap { - hasher = (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher - } else { - hasher = (*abi.OldMapType)(unsafe.Pointer(mTyp)).Hasher - } + hasher := (*abi.SwissMapType)(unsafe.Pointer(mTyp)).Hasher if goarch.PtrSize == 8 { return uint64(hasher(abi.NoEscape(unsafe.Pointer(&v)), uintptr(s))) } diff --git a/src/internal/abi/map.go b/src/internal/abi/map.go new file mode 100644 index 0000000000..6c855667e3 --- /dev/null +++ b/src/internal/abi/map.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package abi + +import ( + "unsafe" +) + +// Map constants common to several packages +// runtime/runtime-gdb.py:MapTypePrinter contains its own copy +const ( + // Number of bits in the group.slot count. + SwissMapGroupSlotsBits = 3 + + // Number of slots in a group. + SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8 + + // Maximum key or elem size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + SwissMapMaxKeyBytes = 128 + SwissMapMaxElemBytes = 128 + + ctrlEmpty = 0b10000000 + bitsetLSB = 0x0101010101010101 + + // Value of control word with all empty slots. + SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty) +) + +type SwissMapType struct { + Type + Key *Type + Elem *Type + Group *Type // internal type representing a slot group + // function for hashing keys (ptr to key, seed) -> hash + Hasher func(unsafe.Pointer, uintptr) uintptr + GroupSize uintptr // == Group.Size_ + SlotSize uintptr // size of key/elem slot + ElemOff uintptr // offset of elem in key/elem slot + Flags uint32 +} + +// Flag values +const ( + SwissMapNeedKeyUpdate = 1 << iota + SwissMapHashMightPanic + SwissMapIndirectKey + SwissMapIndirectElem +) + +func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite + return mt.Flags&SwissMapNeedKeyUpdate != 0 +} +func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic + return mt.Flags&SwissMapHashMightPanic != 0 +} +func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself + return mt.Flags&SwissMapIndirectKey != 0 +} +func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself + return mt.Flags&SwissMapIndirectElem != 0 +} diff --git a/src/internal/abi/map_noswiss.go b/src/internal/abi/map_noswiss.go deleted file mode 100644 index ff8609efcf..0000000000 --- a/src/internal/abi/map_noswiss.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package abi - -import ( - "unsafe" -) - -// Map constants common to several packages -// runtime/runtime-gdb.py:MapTypePrinter contains its own copy -const ( - // Maximum number of key/elem pairs a bucket can hold. - OldMapBucketCountBits = 3 // log2 of number of elements in a bucket. - OldMapBucketCount = 1 << OldMapBucketCountBits - - // Maximum key or elem size to keep inline (instead of mallocing per element). - // Must fit in a uint8. - // Note: fast map functions cannot handle big elems (bigger than MapMaxElemBytes). - OldMapMaxKeyBytes = 128 - OldMapMaxElemBytes = 128 // Must fit in a uint8. -) - -type OldMapType struct { - Type - Key *Type - Elem *Type - Bucket *Type // internal type representing a hash bucket - // function for hashing keys (ptr to key, seed) -> hash - Hasher func(unsafe.Pointer, uintptr) uintptr - KeySize uint8 // size of key slot - ValueSize uint8 // size of elem slot - BucketSize uint16 // size of bucket - Flags uint32 -} - -// Note: flag values must match those used in the TMAP case -// in ../cmd/compile/internal/reflectdata/reflect.go:writeType. -func (mt *OldMapType) IndirectKey() bool { // store ptr to key instead of key itself - return mt.Flags&1 != 0 -} -func (mt *OldMapType) IndirectElem() bool { // store ptr to elem instead of elem itself - return mt.Flags&2 != 0 -} -func (mt *OldMapType) ReflexiveKey() bool { // true if k==k for all keys - return mt.Flags&4 != 0 -} -func (mt *OldMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite - return mt.Flags&8 != 0 -} -func (mt *OldMapType) HashMightPanic() bool { // true if hash function might panic - return mt.Flags&16 != 0 -} diff --git a/src/internal/abi/map_select_noswiss.go b/src/internal/abi/map_select_noswiss.go deleted file mode 100644 index ab2b69de7e..0000000000 --- a/src/internal/abi/map_select_noswiss.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package abi - -// See comment in map_select_swiss.go. -type mapType = OldMapType diff --git a/src/internal/abi/map_select_swiss.go b/src/internal/abi/map_select_swiss.go deleted file mode 100644 index 88a0bb2ebb..0000000000 --- a/src/internal/abi/map_select_swiss.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package abi - -// Select the map type that this binary is built using. This is for common -// lookup methods like Type.Key to know which type to use. -// -// Note that mapType *must not be used by any functions called in the -// compiler to build a target program* because the compiler must use the map -// type determined by run-time GOEXPERIMENT, not the build tags used to build -// the compiler. -// -// TODO(prattmic): This package is rather confusing because it has many -// functions that can't be used by the compiler (e.g., Type.Uncommon depends on -// the layout of type + uncommon objects in the binary. It would be incorrect -// for an ad-hoc local Type object). It may be best to move code that isn't -// usable by the compiler out of the package. -type mapType = SwissMapType diff --git a/src/internal/abi/map_swiss.go b/src/internal/abi/map_swiss.go deleted file mode 100644 index 6c855667e3..0000000000 --- a/src/internal/abi/map_swiss.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package abi - -import ( - "unsafe" -) - -// Map constants common to several packages -// runtime/runtime-gdb.py:MapTypePrinter contains its own copy -const ( - // Number of bits in the group.slot count. - SwissMapGroupSlotsBits = 3 - - // Number of slots in a group. - SwissMapGroupSlots = 1 << SwissMapGroupSlotsBits // 8 - - // Maximum key or elem size to keep inline (instead of mallocing per element). - // Must fit in a uint8. - SwissMapMaxKeyBytes = 128 - SwissMapMaxElemBytes = 128 - - ctrlEmpty = 0b10000000 - bitsetLSB = 0x0101010101010101 - - // Value of control word with all empty slots. - SwissMapCtrlEmpty = bitsetLSB * uint64(ctrlEmpty) -) - -type SwissMapType struct { - Type - Key *Type - Elem *Type - Group *Type // internal type representing a slot group - // function for hashing keys (ptr to key, seed) -> hash - Hasher func(unsafe.Pointer, uintptr) uintptr - GroupSize uintptr // == Group.Size_ - SlotSize uintptr // size of key/elem slot - ElemOff uintptr // offset of elem in key/elem slot - Flags uint32 -} - -// Flag values -const ( - SwissMapNeedKeyUpdate = 1 << iota - SwissMapHashMightPanic - SwissMapIndirectKey - SwissMapIndirectElem -) - -func (mt *SwissMapType) NeedKeyUpdate() bool { // true if we need to update key on an overwrite - return mt.Flags&SwissMapNeedKeyUpdate != 0 -} -func (mt *SwissMapType) HashMightPanic() bool { // true if hash function might panic - return mt.Flags&SwissMapHashMightPanic != 0 -} -func (mt *SwissMapType) IndirectKey() bool { // store ptr to key instead of key itself - return mt.Flags&SwissMapIndirectKey != 0 -} -func (mt *SwissMapType) IndirectElem() bool { // store ptr to elem instead of elem itself - return mt.Flags&SwissMapIndirectElem != 0 -} diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 29e17cf6a2..fd6bef4899 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -355,7 +355,7 @@ func (t *Type) Uncommon() *UncommonType { return &(*u)(unsafe.Pointer(t)).u case Map: type u struct { - mapType + SwissMapType u UncommonType } return &(*u)(unsafe.Pointer(t)).u @@ -384,7 +384,7 @@ func (t *Type) Elem() *Type { tt := (*ChanType)(unsafe.Pointer(t)) return tt.Elem case Map: - tt := (*mapType)(unsafe.Pointer(t)) + tt := (*SwissMapType)(unsafe.Pointer(t)) return tt.Elem case Pointer: tt := (*PtrType)(unsafe.Pointer(t)) @@ -404,12 +404,12 @@ func (t *Type) StructType() *StructType { return (*StructType)(unsafe.Pointer(t)) } -// MapType returns t cast to a *OldMapType or *SwissMapType, or nil if its tag does not match. -func (t *Type) MapType() *mapType { +// MapType returns t cast to a *SwissMapType, or nil if its tag does not match. +func (t *Type) MapType() *SwissMapType { if t.Kind() != Map { return nil } - return (*mapType)(unsafe.Pointer(t)) + return (*SwissMapType)(unsafe.Pointer(t)) } // ArrayType returns t cast to a *ArrayType, or nil if its tag does not match. @@ -471,7 +471,7 @@ func (t *InterfaceType) NumMethod() int { return len(t.Methods) } func (t *Type) Key() *Type { if t.Kind() == Map { - return (*mapType)(unsafe.Pointer(t)).Key + return (*SwissMapType)(unsafe.Pointer(t)).Key } return nil } diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index 3f3db8f25f..df84e9fdf4 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -82,7 +82,6 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { RegabiWrappers: regabiSupported, RegabiArgs: regabiSupported, AliasTypeParams: true, - SwissMap: true, Dwarf5: dwarf5Supported, } diff --git a/src/internal/goexperiment/exp_swissmap_off.go b/src/internal/goexperiment/exp_swissmap_off.go deleted file mode 100644 index 2af40aa60b..0000000000 --- a/src/internal/goexperiment/exp_swissmap_off.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build !goexperiment.swissmap - -package goexperiment - -const SwissMap = false -const SwissMapInt = 0 diff --git a/src/internal/goexperiment/exp_swissmap_on.go b/src/internal/goexperiment/exp_swissmap_on.go deleted file mode 100644 index 73be49b606..0000000000 --- a/src/internal/goexperiment/exp_swissmap_on.go +++ /dev/null @@ -1,8 +0,0 @@ -// Code generated by mkconsts.go. DO NOT EDIT. - -//go:build goexperiment.swissmap - -package goexperiment - -const SwissMap = true -const SwissMapInt = 1 diff --git a/src/internal/goexperiment/flags.go b/src/internal/goexperiment/flags.go index ca99bfbb5c..dd7a4f446c 100644 --- a/src/internal/goexperiment/flags.go +++ b/src/internal/goexperiment/flags.go @@ -105,9 +105,6 @@ type Flags struct { // This flag will be removed with Go 1.25. AliasTypeParams bool - // SwissMap enables the SwissTable-based map implementation. - SwissMap bool - // Synctest enables the testing/synctest package. Synctest bool diff --git a/src/internal/runtime/maps/export_noswiss_test.go b/src/internal/runtime/maps/export_noswiss_test.go deleted file mode 100644 index 333fc6ce90..0000000000 --- a/src/internal/runtime/maps/export_noswiss_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -// This file allows non-GOEXPERIMENT=swissmap builds (i.e., old map builds) to -// construct a swissmap table for running the tests in this package. - -package maps - -import ( - "internal/abi" - "unsafe" -) - -type instantiatedGroup[K comparable, V any] struct { - ctrls ctrlGroup - slots [abi.SwissMapGroupSlots]instantiatedSlot[K, V] -} - -type instantiatedSlot[K comparable, V any] struct { - key K - elem V -} - -func newTestMapType[K comparable, V any]() *abi.SwissMapType { - var m map[K]V - mTyp := abi.TypeOf(m) - omt := (*abi.OldMapType)(unsafe.Pointer(mTyp)) - - var grp instantiatedGroup[K, V] - var slot instantiatedSlot[K, V] - - mt := &abi.SwissMapType{ - Key: omt.Key, - Elem: omt.Elem, - Group: abi.TypeOf(grp), - Hasher: omt.Hasher, - SlotSize: unsafe.Sizeof(slot), - GroupSize: unsafe.Sizeof(grp), - ElemOff: unsafe.Offsetof(slot.elem), - } - if omt.NeedKeyUpdate() { - mt.Flags |= abi.SwissMapNeedKeyUpdate - } - if omt.HashMightPanic() { - mt.Flags |= abi.SwissMapHashMightPanic - } - return mt -} diff --git a/src/internal/runtime/maps/export_swiss_test.go b/src/internal/runtime/maps/export_swiss_test.go deleted file mode 100644 index 3c6faf5c48..0000000000 --- a/src/internal/runtime/maps/export_swiss_test.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package maps - -import ( - "internal/abi" - "unsafe" -) - -func newTestMapType[K comparable, V any]() *abi.SwissMapType { - var m map[K]V - mTyp := abi.TypeOf(m) - mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp)) - return mt -} diff --git a/src/internal/runtime/maps/export_test.go b/src/internal/runtime/maps/export_test.go index 2c7b05ea2d..372ab89627 100644 --- a/src/internal/runtime/maps/export_test.go +++ b/src/internal/runtime/maps/export_test.go @@ -22,6 +22,13 @@ const MaxAvgGroupLoad = maxAvgGroupLoad // we can't properly test hint alloc overflows with this. const maxAllocTest = 1 << 30 +func newTestMapType[K comparable, V any]() *abi.SwissMapType { + var m map[K]V + mTyp := abi.TypeOf(m) + mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp)) + return mt +} + func NewTestMap[K comparable, V any](hint uintptr) (*Map, *abi.SwissMapType) { mt := newTestMapType[K, V]() return NewMap(mt, hint, nil, maxAllocTest), mt diff --git a/src/internal/runtime/maps/map.go b/src/internal/runtime/maps/map.go index 017942cfad..9781c46463 100644 --- a/src/internal/runtime/maps/map.go +++ b/src/internal/runtime/maps/map.go @@ -191,7 +191,7 @@ func h2(h uintptr) uintptr { return h & 0x7f } -// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType. +// Note: changes here must be reflected in cmd/compile/internal/reflectdata/map.go:SwissMapType. type Map struct { // The number of filled slots (i.e. the number of elements in all // tables). Excludes deleted slots. @@ -814,13 +814,6 @@ func (m *Map) Clone(typ *abi.SwissMapType) *Map { return m } -func OldMapKeyError(t *abi.OldMapType, p unsafe.Pointer) error { - if !t.HashMightPanic() { - return nil - } - return mapKeyError2(t.Key, p) -} - func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error { if !t.HashMightPanic() { return nil diff --git a/src/internal/runtime/maps/map_swiss_test.go b/src/internal/runtime/maps/map_swiss_test.go deleted file mode 100644 index eef1c5b191..0000000000 --- a/src/internal/runtime/maps/map_swiss_test.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Tests of map internals that need to use the builtin map type, and thus must -// be built with GOEXPERIMENT=swissmap. - -//go:build goexperiment.swissmap - -package maps_test - -import ( - "fmt" - "internal/abi" - "internal/runtime/maps" - "testing" - "unsafe" -) - -var alwaysFalse bool -var escapeSink any - -func escape[T any](x T) T { - if alwaysFalse { - escapeSink = x - } - return x -} - -const ( - belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75% - atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full. -) - -func TestTableGroupCount(t *testing.T) { - // Test that maps of different sizes have the right number of - // tables/groups. - - type mapCount struct { - tables int - groups uint64 - } - - type mapCase struct { - initialLit mapCount - initialHint mapCount - after mapCount - } - - var testCases = []struct { - n int // n is the number of map elements - escape mapCase // expected values for escaping map - }{ - { - n: -(1 << 30), - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{0, 0}, - after: mapCount{0, 0}, - }, - }, - { - n: -1, - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{0, 0}, - after: mapCount{0, 0}, - }, - }, - { - n: 0, - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{0, 0}, - after: mapCount{0, 0}, - }, - }, - { - n: 1, - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{0, 0}, - after: mapCount{0, 1}, - }, - }, - { - n: abi.SwissMapGroupSlots, - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{0, 0}, - after: mapCount{0, 1}, - }, - }, - { - n: abi.SwissMapGroupSlots + 1, - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 2}, - after: mapCount{1, 2}, - }, - }, - { - n: belowMax, // 1.5 group max = 2 groups @ 75% - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 2}, - after: mapCount{1, 2}, - }, - }, - { - n: atMax, // 2 groups at max - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 2}, - after: mapCount{1, 2}, - }, - }, - { - n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 4}, - after: mapCount{1, 4}, - }, - }, - { - n: 2 * belowMax, // 3 * group max = 4 groups @75% - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 4}, - after: mapCount{1, 4}, - }, - }, - { - n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups - escape: mapCase{ - initialLit: mapCount{0, 0}, - initialHint: mapCount{1, 8}, - after: mapCount{1, 8}, - }, - }, - } - - testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) { - mm := *(**maps.Map)(unsafe.Pointer(&m)) - - gotTab := mm.TableCount() - if gotTab != initial.tables { - t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables) - } - - gotGroup := mm.GroupCount() - if gotGroup != initial.groups { - t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups) - } - - for i := 0; i < n; i++ { - m[i] = i - } - - gotTab = mm.TableCount() - if gotTab != after.tables { - t.Errorf("after TableCount got %d want %d", gotTab, after.tables) - } - - gotGroup = mm.GroupCount() - if gotGroup != after.groups { - t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups) - } - } - - t.Run("mapliteral", func(t *testing.T) { - for _, tc := range testCases { - t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { - t.Run("escape", func(t *testing.T) { - m := escape(map[int]int{}) - testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after) - }) - }) - } - }) - t.Run("nohint", func(t *testing.T) { - for _, tc := range testCases { - t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { - t.Run("escape", func(t *testing.T) { - m := escape(make(map[int]int)) - testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after) - }) - }) - } - }) - t.Run("makemap", func(t *testing.T) { - for _, tc := range testCases { - t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { - t.Run("escape", func(t *testing.T) { - m := escape(make(map[int]int, tc.n)) - testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after) - }) - }) - } - }) - t.Run("makemap64", func(t *testing.T) { - for _, tc := range testCases { - t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { - t.Run("escape", func(t *testing.T) { - m := escape(make(map[int]int, int64(tc.n))) - testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after) - }) - }) - } - }) -} - -func TestTombstoneGrow(t *testing.T) { - tableSizes := []int{16, 32, 64, 128, 256} - for _, tableSize := range tableSizes { - for _, load := range []string{"low", "mid", "high"} { - capacity := tableSize * 7 / 8 - var initialElems int - switch load { - case "low": - initialElems = capacity / 8 - case "mid": - initialElems = capacity / 2 - case "high": - initialElems = capacity - } - t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) { - allocs := testing.AllocsPerRun(1, func() { - // Fill the map with elements. - m := make(map[int]int, capacity) - for i := range initialElems { - m[i] = i - } - - // This is the heart of our test. - // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key - // while keeping the map at a ~constant number of elements (+/-1). - nextKey := initialElems - for range 100000 { - for k := range m { - delete(m, k) - break - } - m[nextKey] = nextKey - nextKey++ - if len(m) != initialElems { - t.Fatal("len(m) should remain constant") - } - } - }) - - // The make has 4 allocs (map, directory, table, groups). - // Each growth has 2 allocs (table, groups). - // We allow two growths if we start full, 1 otherwise. - // Fail (somewhat arbitrarily) if there are more than that. - allowed := float64(4 + 1*2) - if initialElems == capacity { - allowed += 2 - } - if allocs > allowed { - t.Fatalf("got %v allocations, allowed %v", allocs, allowed) - } - }) - } - } -} diff --git a/src/internal/runtime/maps/map_test.go b/src/internal/runtime/maps/map_test.go index 160450ebb2..a5fab54832 100644 --- a/src/internal/runtime/maps/map_test.go +++ b/src/internal/runtime/maps/map_test.go @@ -699,3 +699,252 @@ func TestMapDeleteClear(t *testing.T) { t.Errorf("Delete(%d) failed to clear element. got %d want 0", key, gotElem) } } + +var alwaysFalse bool +var escapeSink any + +func escape[T any](x T) T { + if alwaysFalse { + escapeSink = x + } + return x +} + +const ( + belowMax = abi.SwissMapGroupSlots * 3 / 2 // 1.5 * group max = 2 groups @ 75% + atMax = (2 * abi.SwissMapGroupSlots * maps.MaxAvgGroupLoad) / abi.SwissMapGroupSlots // 2 groups at 7/8 full. +) + +func TestTableGroupCount(t *testing.T) { + // Test that maps of different sizes have the right number of + // tables/groups. + + type mapCount struct { + tables int + groups uint64 + } + + type mapCase struct { + initialLit mapCount + initialHint mapCount + after mapCount + } + + var testCases = []struct { + n int // n is the number of map elements + escape mapCase // expected values for escaping map + }{ + { + n: -(1 << 30), + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{0, 0}, + after: mapCount{0, 0}, + }, + }, + { + n: -1, + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{0, 0}, + after: mapCount{0, 0}, + }, + }, + { + n: 0, + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{0, 0}, + after: mapCount{0, 0}, + }, + }, + { + n: 1, + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{0, 0}, + after: mapCount{0, 1}, + }, + }, + { + n: abi.SwissMapGroupSlots, + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{0, 0}, + after: mapCount{0, 1}, + }, + }, + { + n: abi.SwissMapGroupSlots + 1, + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 2}, + after: mapCount{1, 2}, + }, + }, + { + n: belowMax, // 1.5 group max = 2 groups @ 75% + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 2}, + after: mapCount{1, 2}, + }, + }, + { + n: atMax, // 2 groups at max + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 2}, + after: mapCount{1, 2}, + }, + }, + { + n: atMax + 1, // 2 groups at max + 1 -> grow to 4 groups + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 4}, + after: mapCount{1, 4}, + }, + }, + { + n: 2 * belowMax, // 3 * group max = 4 groups @75% + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 4}, + after: mapCount{1, 4}, + }, + }, + { + n: 2*atMax + 1, // 4 groups at max + 1 -> grow to 8 groups + escape: mapCase{ + initialLit: mapCount{0, 0}, + initialHint: mapCount{1, 8}, + after: mapCount{1, 8}, + }, + }, + } + + testMap := func(t *testing.T, m map[int]int, n int, initial, after mapCount) { + mm := *(**maps.Map)(unsafe.Pointer(&m)) + + gotTab := mm.TableCount() + if gotTab != initial.tables { + t.Errorf("initial TableCount got %d want %d", gotTab, initial.tables) + } + + gotGroup := mm.GroupCount() + if gotGroup != initial.groups { + t.Errorf("initial GroupCount got %d want %d", gotGroup, initial.groups) + } + + for i := 0; i < n; i++ { + m[i] = i + } + + gotTab = mm.TableCount() + if gotTab != after.tables { + t.Errorf("after TableCount got %d want %d", gotTab, after.tables) + } + + gotGroup = mm.GroupCount() + if gotGroup != after.groups { + t.Errorf("after GroupCount got %d want %d", gotGroup, after.groups) + } + } + + t.Run("mapliteral", func(t *testing.T) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { + t.Run("escape", func(t *testing.T) { + m := escape(map[int]int{}) + testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after) + }) + }) + } + }) + t.Run("nohint", func(t *testing.T) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { + t.Run("escape", func(t *testing.T) { + m := escape(make(map[int]int)) + testMap(t, m, tc.n, tc.escape.initialLit, tc.escape.after) + }) + }) + } + }) + t.Run("makemap", func(t *testing.T) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { + t.Run("escape", func(t *testing.T) { + m := escape(make(map[int]int, tc.n)) + testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after) + }) + }) + } + }) + t.Run("makemap64", func(t *testing.T) { + for _, tc := range testCases { + t.Run(fmt.Sprintf("n=%d", tc.n), func(t *testing.T) { + t.Run("escape", func(t *testing.T) { + m := escape(make(map[int]int, int64(tc.n))) + testMap(t, m, tc.n, tc.escape.initialHint, tc.escape.after) + }) + }) + } + }) +} + +func TestTombstoneGrow(t *testing.T) { + tableSizes := []int{16, 32, 64, 128, 256} + for _, tableSize := range tableSizes { + for _, load := range []string{"low", "mid", "high"} { + capacity := tableSize * 7 / 8 + var initialElems int + switch load { + case "low": + initialElems = capacity / 8 + case "mid": + initialElems = capacity / 2 + case "high": + initialElems = capacity + } + t.Run(fmt.Sprintf("tableSize=%d/elems=%d/load=%0.3f", tableSize, initialElems, float64(initialElems)/float64(tableSize)), func(t *testing.T) { + allocs := testing.AllocsPerRun(1, func() { + // Fill the map with elements. + m := make(map[int]int, capacity) + for i := range initialElems { + m[i] = i + } + + // This is the heart of our test. + // Loop over the map repeatedly, deleting a key then adding a not-yet-seen key + // while keeping the map at a ~constant number of elements (+/-1). + nextKey := initialElems + for range 100000 { + for k := range m { + delete(m, k) + break + } + m[nextKey] = nextKey + nextKey++ + if len(m) != initialElems { + t.Fatal("len(m) should remain constant") + } + } + }) + + // The make has 4 allocs (map, directory, table, groups). + // Each growth has 2 allocs (table, groups). + // We allow two growths if we start full, 1 otherwise. + // Fail (somewhat arbitrarily) if there are more than that. + allowed := float64(4 + 1*2) + if initialElems == capacity { + allowed += 2 + } + if allocs > allowed { + t.Fatalf("got %v allocations, allowed %v", allocs, allowed) + } + }) + } + } +} diff --git a/src/internal/runtime/maps/runtime.go b/src/internal/runtime/maps/runtime.go index 3d06f54f4d..c4a3e07041 100644 --- a/src/internal/runtime/maps/runtime.go +++ b/src/internal/runtime/maps/runtime.go @@ -6,6 +6,10 @@ package maps import ( "internal/abi" + "internal/asan" + "internal/msan" + "internal/race" + "internal/runtime/sys" "unsafe" ) @@ -28,3 +32,337 @@ func newarray(typ *abi.Type, n int) unsafe.Pointer //go:linkname newobject func newobject(typ *abi.Type) unsafe.Pointer + +// Pushed from runtime in order to use runtime.plainError +// +//go:linkname errNilAssign +var errNilAssign error + +// Pull from runtime. It is important that is this the exact same copy as the +// runtime because runtime.mapaccess1_fat compares the returned pointer with +// &runtime.zeroVal[0]. +// TODO: move zeroVal to internal/abi? +// +//go:linkname zeroVal runtime.zeroVal +var zeroVal [abi.ZeroValSize]byte + +// mapaccess1 returns a pointer to h[key]. Never returns nil, instead +// it will return a reference to the zero object for the elem type if +// the key is not in the map. +// NOTE: The returned pointer may keep the whole map live, so don't +// hold onto it for very long. +// +//go:linkname runtime_mapaccess1 runtime.mapaccess1 +func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess1) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + race.ReadObjectPC(typ.Key, key, callerpc, pc) + } + if msan.Enabled && m != nil { + msan.Read(key, typ.Key.Size_) + } + if asan.Enabled && m != nil { + asan.Read(key, typ.Key.Size_) + } + + if m == nil || m.Used() == 0 { + if err := mapKeyError(typ, key); err != nil { + panic(err) // see issue 23734 + } + return unsafe.Pointer(&zeroVal[0]) + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + } + + hash := typ.Hasher(key, m.seed) + + if m.dirLen <= 0 { + _, elem, ok := m.getWithKeySmall(typ, hash, key) + if !ok { + return unsafe.Pointer(&zeroVal[0]) + } + return elem + } + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + slotKeyOrig := slotKey + if typ.IndirectKey() { + slotKey = *((*unsafe.Pointer)(slotKey)) + } + if typ.Key.Equal(key, slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) + if typ.IndirectElem() { + slotElem = *((*unsafe.Pointer)(slotElem)) + } + return slotElem + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]) + } + } +} + +//go:linkname runtime_mapaccess2 runtime.mapaccess2 +func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess1) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + race.ReadObjectPC(typ.Key, key, callerpc, pc) + } + if msan.Enabled && m != nil { + msan.Read(key, typ.Key.Size_) + } + if asan.Enabled && m != nil { + asan.Read(key, typ.Key.Size_) + } + + if m == nil || m.Used() == 0 { + if err := mapKeyError(typ, key); err != nil { + panic(err) // see issue 23734 + } + return unsafe.Pointer(&zeroVal[0]), false + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + } + + hash := typ.Hasher(key, m.seed) + + if m.dirLen == 0 { + _, elem, ok := m.getWithKeySmall(typ, hash, key) + if !ok { + return unsafe.Pointer(&zeroVal[0]), false + } + return elem, true + } + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + slotKeyOrig := slotKey + if typ.IndirectKey() { + slotKey = *((*unsafe.Pointer)(slotKey)) + } + if typ.Key.Equal(key, slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) + if typ.IndirectElem() { + slotElem = *((*unsafe.Pointer)(slotElem)) + } + return slotElem, true + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]), false + } + } +} + +//go:linkname runtime_mapassign runtime.mapassign +func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + race.ReadObjectPC(typ.Key, key, callerpc, pc) + } + if msan.Enabled { + msan.Read(key, typ.Key.Size_) + } + if asan.Enabled { + asan.Read(key, typ.Key.Size_) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + hash := typ.Hasher(key, m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmall(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot + // we find, which we'll use to insert the new entry if + // necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + slotKeyOrig := slotKey + if typ.IndirectKey() { + slotKey = *((*unsafe.Pointer)(slotKey)) + } + if typ.Key.Equal(key, slotKey) { + if typ.NeedKeyUpdate() { + typedmemmove(typ.Key, slotKey, key) + } + + slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) + if typ.IndirectElem() { + slotElem = *((*unsafe.Pointer)(slotElem)) + } + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + + var i uintptr + + // If we found a deleted slot along the way, we + // can replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } else { + // Otherwise, use the empty slot. + i = match.first() + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + slotKeyOrig := slotKey + if typ.IndirectKey() { + kmem := newobject(typ.Key) + *(*unsafe.Pointer)(slotKey) = kmem + slotKey = kmem + } + typedmemmove(typ.Key, slotKey, key) + + slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) + if typ.IndirectElem() { + emem := newobject(typ.Elem) + *(*unsafe.Pointer)(slotElem) = emem + slotElem = emem + } + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + + // No empty slots in this group. Check for a deleted + // slot, which we'll use if we don't find a match later + // in the probe sequence. + // + // We only need to remember a single deleted slot. + if firstDeletedGroup.data == nil { + // Since we already checked for empty slots + // above, matches here must be deleted slots. + match = g.ctrls().matchEmptyOrDeleted() + if match != 0 { + firstDeletedGroup = g + firstDeletedSlot = match.first() + } + } + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} diff --git a/src/internal/runtime/maps/runtime_fast32.go b/src/internal/runtime/maps/runtime_fast32.go new file mode 100644 index 0000000000..aece31a2a4 --- /dev/null +++ b/src/internal/runtime/maps/runtime_fast32.go @@ -0,0 +1,473 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package maps + +import ( + "internal/abi" + "internal/race" + "internal/runtime/sys" + "unsafe" +) + +//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32 +func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil + } + + if m.dirLen == 0 { + g := groupReference{ + data: m.dirPtr, + } + full := g.ctrls().matchFull() + slotKey := g.key(typ, 0) + slotSize := typ.SlotSize + for full != 0 { + if key == *(*uint32)(slotKey) && full.lowestSet() { + slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) + return slotElem + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + full = full.shiftOutLowest() + } + return unsafe.Pointer(&zeroVal[0]) + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint32)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) + return slotElem + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]) + } + } +} + +//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32 +func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil, false + } + + if m.dirLen == 0 { + g := groupReference{ + data: m.dirPtr, + } + full := g.ctrls().matchFull() + slotKey := g.key(typ, 0) + slotSize := typ.SlotSize + for full != 0 { + if key == *(*uint32)(slotKey) && full.lowestSet() { + slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) + return slotElem, true + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + full = full.shiftOutLowest() + } + return unsafe.Pointer(&zeroVal[0]), false + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint32)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) + return slotElem, true + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]), false + } + } +} + +func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer { + g := groupReference{ + data: m.dirPtr, + } + + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint32)(slotKey) { + slotElem := g.elem(typ, i) + return slotElem + } + match = match.removeFirst() + } + + // There can't be deleted slots, small maps can't have them + // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit + // more efficient than matchEmpty. + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + fatal("small map with no empty slot (concurrent map writes?)") + } + + i := match.first() + + slotKey := g.key(typ, i) + *(*uint32)(slotKey) = key + + slotElem := g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + m.used++ + + return slotElem +} + +//go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32 +func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign_fast32) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmallFast32(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot + // we find, which we'll use to insert the new entry if + // necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint32)(slotKey) { + slotElem = g.elem(typ, i) + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + continue // nothing but filled slots. Keep probing. + } + i := match.first() + if g.ctrls().get(i) == ctrlDeleted { + // There are some deleted slots. Remember + // the first one, and keep probing. + if firstDeletedGroup.data == nil { + firstDeletedGroup = g + firstDeletedSlot = i + } + continue + } + // We've found an empty slot, which means we've reached the end of + // the probe sequence. + + // If we found a deleted slot along the way, we can + // replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } + + // If we have no space left, first try to remove some tombstones. + if t.growthLeft == 0 { + t.pruneTombstones(typ, m) + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + *(*uint32)(slotKey) = key + + slotElem = g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} + +// Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr. +// +// TODO(prattmic): With some compiler refactoring we could avoid duplication of this function. +// +//go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr +func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign_fast32ptr) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmallFastPtr(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot we + // find, which we'll use to insert the new entry if necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*unsafe.Pointer)(slotKey) { + slotElem = g.elem(typ, i) + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + continue // nothing but filled slots. Keep probing. + } + i := match.first() + if g.ctrls().get(i) == ctrlDeleted { + // There are some deleted slots. Remember + // the first one, and keep probing. + if firstDeletedGroup.data == nil { + firstDeletedGroup = g + firstDeletedSlot = i + } + continue + } + // We've found an empty slot, which means we've reached the end of + // the probe sequence. + + // If we found a deleted slot along the way, we can + // replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + *(*unsafe.Pointer)(slotKey) = key + + slotElem = g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} + +//go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32 +func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) { + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return + } + + m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) +} diff --git a/src/internal/runtime/maps/runtime_fast32_swiss.go b/src/internal/runtime/maps/runtime_fast32_swiss.go deleted file mode 100644 index d57d042527..0000000000 --- a/src/internal/runtime/maps/runtime_fast32_swiss.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package maps - -import ( - "internal/abi" - "internal/race" - "internal/runtime/sys" - "unsafe" -) - -//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32 -func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast32) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil - } - - if m.dirLen == 0 { - g := groupReference{ - data: m.dirPtr, - } - full := g.ctrls().matchFull() - slotKey := g.key(typ, 0) - slotSize := typ.SlotSize - for full != 0 { - if key == *(*uint32)(slotKey) && full.lowestSet() { - slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) - return slotElem - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - full = full.shiftOutLowest() - } - return unsafe.Pointer(&zeroVal[0]) - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint32)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) - return slotElem - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]) - } - } -} - -//go:linkname runtime_mapaccess2_fast32 runtime.mapaccess2_fast32 -func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsafe.Pointer, bool) { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast32) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil, false - } - - if m.dirLen == 0 { - g := groupReference{ - data: m.dirPtr, - } - full := g.ctrls().matchFull() - slotKey := g.key(typ, 0) - slotSize := typ.SlotSize - for full != 0 { - if key == *(*uint32)(slotKey) && full.lowestSet() { - slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) - return slotElem, true - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - full = full.shiftOutLowest() - } - return unsafe.Pointer(&zeroVal[0]), false - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint32)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff) - return slotElem, true - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]), false - } - } -} - -func (m *Map) putSlotSmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) unsafe.Pointer { - g := groupReference{ - data: m.dirPtr, - } - - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint32)(slotKey) { - slotElem := g.elem(typ, i) - return slotElem - } - match = match.removeFirst() - } - - // There can't be deleted slots, small maps can't have them - // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit - // more efficient than matchEmpty. - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - fatal("small map with no empty slot (concurrent map writes?)") - } - - i := match.first() - - slotKey := g.key(typ, i) - *(*uint32)(slotKey) = key - - slotElem := g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - m.used++ - - return slotElem -} - -//go:linkname runtime_mapassign_fast32 runtime.mapassign_fast32 -func runtime_mapassign_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign_fast32) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmallFast32(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot - // we find, which we'll use to insert the new entry if - // necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint32)(slotKey) { - slotElem = g.elem(typ, i) - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - continue // nothing but filled slots. Keep probing. - } - i := match.first() - if g.ctrls().get(i) == ctrlDeleted { - // There are some deleted slots. Remember - // the first one, and keep probing. - if firstDeletedGroup.data == nil { - firstDeletedGroup = g - firstDeletedSlot = i - } - continue - } - // We've found an empty slot, which means we've reached the end of - // the probe sequence. - - // If we found a deleted slot along the way, we can - // replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } - - // If we have no space left, first try to remove some tombstones. - if t.growthLeft == 0 { - t.pruneTombstones(typ, m) - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - *(*uint32)(slotKey) = key - - slotElem = g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} - -// Key is a 32-bit pointer (only called on 32-bit GOARCH). This source is identical to fast64ptr. -// -// TODO(prattmic): With some compiler refactoring we could avoid duplication of this function. -// -//go:linkname runtime_mapassign_fast32ptr runtime.mapassign_fast32ptr -func runtime_mapassign_fast32ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign_fast32ptr) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmallFastPtr(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot we - // find, which we'll use to insert the new entry if necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*unsafe.Pointer)(slotKey) { - slotElem = g.elem(typ, i) - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - continue // nothing but filled slots. Keep probing. - } - i := match.first() - if g.ctrls().get(i) == ctrlDeleted { - // There are some deleted slots. Remember - // the first one, and keep probing. - if firstDeletedGroup.data == nil { - firstDeletedGroup = g - firstDeletedSlot = i - } - continue - } - // We've found an empty slot, which means we've reached the end of - // the probe sequence. - - // If we found a deleted slot along the way, we can - // replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - *(*unsafe.Pointer)(slotKey) = key - - slotElem = g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} - -//go:linkname runtime_mapdelete_fast32 runtime.mapdelete_fast32 -func runtime_mapdelete_fast32(typ *abi.SwissMapType, m *Map, key uint32) { - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapdelete_fast32) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return - } - - m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) -} diff --git a/src/internal/runtime/maps/runtime_fast64.go b/src/internal/runtime/maps/runtime_fast64.go new file mode 100644 index 0000000000..1e06d17837 --- /dev/null +++ b/src/internal/runtime/maps/runtime_fast64.go @@ -0,0 +1,512 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package maps + +import ( + "internal/abi" + "internal/race" + "internal/runtime/sys" + "unsafe" +) + +//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64 +func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil + } + + if m.dirLen == 0 { + g := groupReference{ + data: m.dirPtr, + } + full := g.ctrls().matchFull() + slotKey := g.key(typ, 0) + slotSize := typ.SlotSize + for full != 0 { + if key == *(*uint64)(slotKey) && full.lowestSet() { + slotElem := unsafe.Pointer(uintptr(slotKey) + 8) + return slotElem + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + full = full.shiftOutLowest() + } + return unsafe.Pointer(&zeroVal[0]) + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint64)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + 8) + return slotElem + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]) + } + } +} + +//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64 +func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil, false + } + + if m.dirLen == 0 { + g := groupReference{ + data: m.dirPtr, + } + full := g.ctrls().matchFull() + slotKey := g.key(typ, 0) + slotSize := typ.SlotSize + for full != 0 { + if key == *(*uint64)(slotKey) && full.lowestSet() { + slotElem := unsafe.Pointer(uintptr(slotKey) + 8) + return slotElem, true + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + full = full.shiftOutLowest() + } + return unsafe.Pointer(&zeroVal[0]), false + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint64)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + 8) + return slotElem, true + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]), false + } + } +} + +func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer { + g := groupReference{ + data: m.dirPtr, + } + + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint64)(slotKey) { + slotElem := g.elem(typ, i) + return slotElem + } + match = match.removeFirst() + } + + // There can't be deleted slots, small maps can't have them + // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit + // more efficient than matchEmpty. + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + fatal("small map with no empty slot (concurrent map writes?)") + } + + i := match.first() + + slotKey := g.key(typ, i) + *(*uint64)(slotKey) = key + + slotElem := g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + m.used++ + + return slotElem +} + +//go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64 +func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign_fast64) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmallFast64(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot + // we find, which we'll use to insert the new entry if + // necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*uint64)(slotKey) { + slotElem = g.elem(typ, i) + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + continue // nothing but filled slots. Keep probing. + } + i := match.first() + if g.ctrls().get(i) == ctrlDeleted { + // There are some deleted slots. Remember + // the first one, and keep probing. + if firstDeletedGroup.data == nil { + firstDeletedGroup = g + firstDeletedSlot = i + } + continue + } + // We've found an empty slot, which means we've reached the end of + // the probe sequence. + + // If we found a deleted slot along the way, we can + // replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } + + // If we have no space left, first try to remove some tombstones. + if t.growthLeft == 0 { + t.pruneTombstones(typ, m) + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + *(*uint64)(slotKey) = key + + slotElem = g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} + +func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { + g := groupReference{ + data: m.dirPtr, + } + + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*unsafe.Pointer)(slotKey) { + slotElem := g.elem(typ, i) + return slotElem + } + match = match.removeFirst() + } + + // There can't be deleted slots, small maps can't have them + // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit + // more efficient than matchEmpty. + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + fatal("small map with no empty slot (concurrent map writes?)") + } + + i := match.first() + + slotKey := g.key(typ, i) + *(*unsafe.Pointer)(slotKey) = key + + slotElem := g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + m.used++ + + return slotElem +} + +// Key is a 64-bit pointer (only called on 64-bit GOARCH). +// +//go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr +func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign_fast64ptr) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmallFastPtr(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot + // we find, which we'll use to insert the new entry if + // necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*unsafe.Pointer)(slotKey) { + slotElem = g.elem(typ, i) + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + continue // nothing but filled slots. Keep probing. + } + i := match.first() + if g.ctrls().get(i) == ctrlDeleted { + // There are some deleted slots. Remember + // the first one, and keep probing. + if firstDeletedGroup.data == nil { + firstDeletedGroup = g + firstDeletedSlot = i + } + continue + } + // We've found an empty slot, which means we've reached the end of + // the probe sequence. + + // If we found a deleted slot along the way, we can + // replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + *(*unsafe.Pointer)(slotKey) = key + + slotElem = g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} + +//go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64 +func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) { + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return + } + + m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) +} diff --git a/src/internal/runtime/maps/runtime_fast64_swiss.go b/src/internal/runtime/maps/runtime_fast64_swiss.go deleted file mode 100644 index 461cb1d318..0000000000 --- a/src/internal/runtime/maps/runtime_fast64_swiss.go +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package maps - -import ( - "internal/abi" - "internal/race" - "internal/runtime/sys" - "unsafe" -) - -//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64 -func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess1_fast64) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil - } - - if m.dirLen == 0 { - g := groupReference{ - data: m.dirPtr, - } - full := g.ctrls().matchFull() - slotKey := g.key(typ, 0) - slotSize := typ.SlotSize - for full != 0 { - if key == *(*uint64)(slotKey) && full.lowestSet() { - slotElem := unsafe.Pointer(uintptr(slotKey) + 8) - return slotElem - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - full = full.shiftOutLowest() - } - return unsafe.Pointer(&zeroVal[0]) - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint64)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + 8) - return slotElem - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]) - } - } -} - -//go:linkname runtime_mapaccess2_fast64 runtime.mapaccess2_fast64 -func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsafe.Pointer, bool) { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess2_fast64) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil, false - } - - if m.dirLen == 0 { - g := groupReference{ - data: m.dirPtr, - } - full := g.ctrls().matchFull() - slotKey := g.key(typ, 0) - slotSize := typ.SlotSize - for full != 0 { - if key == *(*uint64)(slotKey) && full.lowestSet() { - slotElem := unsafe.Pointer(uintptr(slotKey) + 8) - return slotElem, true - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - full = full.shiftOutLowest() - } - return unsafe.Pointer(&zeroVal[0]), false - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint64)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + 8) - return slotElem, true - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]), false - } - } -} - -func (m *Map) putSlotSmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) unsafe.Pointer { - g := groupReference{ - data: m.dirPtr, - } - - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint64)(slotKey) { - slotElem := g.elem(typ, i) - return slotElem - } - match = match.removeFirst() - } - - // There can't be deleted slots, small maps can't have them - // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit - // more efficient than matchEmpty. - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - fatal("small map with no empty slot (concurrent map writes?)") - } - - i := match.first() - - slotKey := g.key(typ, i) - *(*uint64)(slotKey) = key - - slotElem := g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - m.used++ - - return slotElem -} - -//go:linkname runtime_mapassign_fast64 runtime.mapassign_fast64 -func runtime_mapassign_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign_fast64) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmallFast64(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot - // we find, which we'll use to insert the new entry if - // necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*uint64)(slotKey) { - slotElem = g.elem(typ, i) - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - continue // nothing but filled slots. Keep probing. - } - i := match.first() - if g.ctrls().get(i) == ctrlDeleted { - // There are some deleted slots. Remember - // the first one, and keep probing. - if firstDeletedGroup.data == nil { - firstDeletedGroup = g - firstDeletedSlot = i - } - continue - } - // We've found an empty slot, which means we've reached the end of - // the probe sequence. - - // If we found a deleted slot along the way, we can - // replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } - - // If we have no space left, first try to remove some tombstones. - if t.growthLeft == 0 { - t.pruneTombstones(typ, m) - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - *(*uint64)(slotKey) = key - - slotElem = g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} - -func (m *Map) putSlotSmallFastPtr(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { - g := groupReference{ - data: m.dirPtr, - } - - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*unsafe.Pointer)(slotKey) { - slotElem := g.elem(typ, i) - return slotElem - } - match = match.removeFirst() - } - - // There can't be deleted slots, small maps can't have them - // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit - // more efficient than matchEmpty. - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - fatal("small map with no empty slot (concurrent map writes?)") - } - - i := match.first() - - slotKey := g.key(typ, i) - *(*unsafe.Pointer)(slotKey) = key - - slotElem := g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - m.used++ - - return slotElem -} - -// Key is a 64-bit pointer (only called on 64-bit GOARCH). -// -//go:linkname runtime_mapassign_fast64ptr runtime.mapassign_fast64ptr -func runtime_mapassign_fast64ptr(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign_fast64ptr) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmallFastPtr(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot - // we find, which we'll use to insert the new entry if - // necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*unsafe.Pointer)(slotKey) { - slotElem = g.elem(typ, i) - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - continue // nothing but filled slots. Keep probing. - } - i := match.first() - if g.ctrls().get(i) == ctrlDeleted { - // There are some deleted slots. Remember - // the first one, and keep probing. - if firstDeletedGroup.data == nil { - firstDeletedGroup = g - firstDeletedSlot = i - } - continue - } - // We've found an empty slot, which means we've reached the end of - // the probe sequence. - - // If we found a deleted slot along the way, we can - // replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - *(*unsafe.Pointer)(slotKey) = key - - slotElem = g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} - -//go:linkname runtime_mapdelete_fast64 runtime.mapdelete_fast64 -func runtime_mapdelete_fast64(typ *abi.SwissMapType, m *Map, key uint64) { - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapdelete_fast64) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return - } - - m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) -} diff --git a/src/internal/runtime/maps/runtime_faststr.go b/src/internal/runtime/maps/runtime_faststr.go new file mode 100644 index 0000000000..8c6944e6b8 --- /dev/null +++ b/src/internal/runtime/maps/runtime_faststr.go @@ -0,0 +1,411 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package maps + +import ( + "internal/abi" + "internal/goarch" + "internal/race" + "internal/runtime/sys" + "unsafe" +) + +func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer { + g := groupReference{ + data: m.dirPtr, + } + + ctrls := *g.ctrls() + slotKey := g.key(typ, 0) + slotSize := typ.SlotSize + + // The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight, + // where there are 8 keys to check, all of which don't quick-match the lookup key. + // In that case, we can save hashing the lookup key. That savings is worth this extra code + // for strings that are long enough that hashing is expensive. + if len(key) > 64 { + // String hashing and equality might be expensive. Do a quick check first. + j := abi.SwissMapGroupSlots + for i := range abi.SwissMapGroupSlots { + if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) { + if j < abi.SwissMapGroupSlots { + // 2 strings both passed the quick equality test. + // Break out of this loop and do it the slow way. + goto dohash + } + j = i + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + ctrls >>= 8 + } + if j == abi.SwissMapGroupSlots { + // No slot passed the quick test. + return nil + } + // There's exactly one slot that passed the quick test. Do the single expensive comparison. + slotKey = g.key(typ, uintptr(j)) + if key == *(*string)(slotKey) { + return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) + } + return nil + } + +dohash: + // This path will cost 1 hash and 1+ε comparisons. + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed) + h2 := uint8(h2(hash)) + ctrls = *g.ctrls() + slotKey = g.key(typ, 0) + + for range abi.SwissMapGroupSlots { + if uint8(ctrls) == h2 && key == *(*string)(slotKey) { + return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) + } + slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) + ctrls >>= 8 + } + return nil +} + +// Returns true if a and b might be equal. +// Returns false if a and b are definitely not equal. +// Requires len(a)>=8. +func longStringQuickEqualityTest(a, b string) bool { + if len(a) != len(b) { + return false + } + x, y := stringPtr(a), stringPtr(b) + // Check first 8 bytes. + if *(*[8]byte)(x) != *(*[8]byte)(y) { + return false + } + // Check last 8 bytes. + x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8) + y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8) + if *(*[8]byte)(x) != *(*[8]byte)(y) { + return false + } + return true +} +func stringPtr(s string) unsafe.Pointer { + type stringStruct struct { + ptr unsafe.Pointer + len int + } + return (*stringStruct)(unsafe.Pointer(&s)).ptr +} + +//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr +func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]) + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil + } + + if m.dirLen <= 0 { + elem := m.getWithoutKeySmallFastStr(typ, key) + if elem == nil { + return unsafe.Pointer(&zeroVal[0]) + } + return elem + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*string)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) + return slotElem + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]) + } + } +} + +//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr +func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr) + race.ReadPC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return unsafe.Pointer(&zeroVal[0]), false + } + + if m.writing != 0 { + fatal("concurrent map read and map write") + return nil, false + } + + if m.dirLen <= 0 { + elem := m.getWithoutKeySmallFastStr(typ, key) + if elem == nil { + return unsafe.Pointer(&zeroVal[0]), false + } + return elem, true + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + // Probe table. + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + + match := g.ctrls().matchH2(h2(hash)) + + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*string)(slotKey) { + slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) + return slotElem, true + } + match = match.removeFirst() + } + + match = g.ctrls().matchEmpty() + if match != 0 { + // Finding an empty slot means we've reached the end of + // the probe sequence. + return unsafe.Pointer(&zeroVal[0]), false + } + } +} + +func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer { + g := groupReference{ + data: m.dirPtr, + } + + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*string)(slotKey) { + // Key needs update, as the backing storage may differ. + *(*string)(slotKey) = key + slotElem := g.elem(typ, i) + return slotElem + } + match = match.removeFirst() + } + + // There can't be deleted slots, small maps can't have them + // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit + // more efficient than matchEmpty. + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + fatal("small map with no empty slot (concurrent map writes?)") + } + + i := match.first() + + slotKey := g.key(typ, i) + *(*string)(slotKey) = key + + slotElem := g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + m.used++ + + return slotElem +} + +//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr +func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer { + if m == nil { + panic(errNilAssign) + } + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapassign_faststr) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + if m.writing != 0 { + fatal("concurrent map writes") + } + + k := key + hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) + + // Set writing after calling Hasher, since Hasher may panic, in which + // case we have not actually done a write. + m.writing ^= 1 // toggle, see comment on writing + + if m.dirPtr == nil { + m.growToSmall(typ) + } + + if m.dirLen == 0 { + if m.used < abi.SwissMapGroupSlots { + elem := m.putSlotSmallFastStr(typ, hash, key) + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return elem + } + + // Can't fit another entry, grow to full size map. + m.growToTable(typ) + } + + var slotElem unsafe.Pointer +outer: + for { + // Select table. + idx := m.directoryIndex(hash) + t := m.directoryAt(idx) + + seq := makeProbeSeq(h1(hash), t.groups.lengthMask) + + // As we look for a match, keep track of the first deleted slot + // we find, which we'll use to insert the new entry if + // necessary. + var firstDeletedGroup groupReference + var firstDeletedSlot uintptr + + for ; ; seq = seq.next() { + g := t.groups.group(typ, seq.offset) + match := g.ctrls().matchH2(h2(hash)) + + // Look for an existing slot containing this key. + for match != 0 { + i := match.first() + + slotKey := g.key(typ, i) + if key == *(*string)(slotKey) { + // Key needs update, as the backing + // storage may differ. + *(*string)(slotKey) = key + slotElem = g.elem(typ, i) + + t.checkInvariants(typ, m) + break outer + } + match = match.removeFirst() + } + + // No existing slot for this key in this group. Is this the end + // of the probe sequence? + match = g.ctrls().matchEmptyOrDeleted() + if match == 0 { + continue // nothing but filled slots. Keep probing. + } + i := match.first() + if g.ctrls().get(i) == ctrlDeleted { + // There are some deleted slots. Remember + // the first one, and keep probing. + if firstDeletedGroup.data == nil { + firstDeletedGroup = g + firstDeletedSlot = i + } + continue + } + // We've found an empty slot, which means we've reached the end of + // the probe sequence. + + // If we found a deleted slot along the way, we can + // replace it without consuming growthLeft. + if firstDeletedGroup.data != nil { + g = firstDeletedGroup + i = firstDeletedSlot + t.growthLeft++ // will be decremented below to become a no-op. + } + + // If we have no space left, first try to remove some tombstones. + if t.growthLeft == 0 { + t.pruneTombstones(typ, m) + } + + // If there is room left to grow, just insert the new entry. + if t.growthLeft > 0 { + slotKey := g.key(typ, i) + *(*string)(slotKey) = key + + slotElem = g.elem(typ, i) + + g.ctrls().set(i, ctrl(h2(hash))) + t.growthLeft-- + t.used++ + m.used++ + + t.checkInvariants(typ, m) + break outer + } + + t.rehash(typ, m) + continue outer + } + } + + if m.writing == 0 { + fatal("concurrent map writes") + } + m.writing ^= 1 + + return slotElem +} + +//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr +func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) { + if race.Enabled { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr) + race.WritePC(unsafe.Pointer(m), callerpc, pc) + } + + if m == nil || m.Used() == 0 { + return + } + + m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) +} diff --git a/src/internal/runtime/maps/runtime_faststr_swiss.go b/src/internal/runtime/maps/runtime_faststr_swiss.go deleted file mode 100644 index 0d7b02e20c..0000000000 --- a/src/internal/runtime/maps/runtime_faststr_swiss.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package maps - -import ( - "internal/abi" - "internal/goarch" - "internal/race" - "internal/runtime/sys" - "unsafe" -) - -func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer { - g := groupReference{ - data: m.dirPtr, - } - - ctrls := *g.ctrls() - slotKey := g.key(typ, 0) - slotSize := typ.SlotSize - - // The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight, - // where there are 8 keys to check, all of which don't quick-match the lookup key. - // In that case, we can save hashing the lookup key. That savings is worth this extra code - // for strings that are long enough that hashing is expensive. - if len(key) > 64 { - // String hashing and equality might be expensive. Do a quick check first. - j := abi.SwissMapGroupSlots - for i := range abi.SwissMapGroupSlots { - if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) { - if j < abi.SwissMapGroupSlots { - // 2 strings both passed the quick equality test. - // Break out of this loop and do it the slow way. - goto dohash - } - j = i - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - ctrls >>= 8 - } - if j == abi.SwissMapGroupSlots { - // No slot passed the quick test. - return nil - } - // There's exactly one slot that passed the quick test. Do the single expensive comparison. - slotKey = g.key(typ, uintptr(j)) - if key == *(*string)(slotKey) { - return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) - } - return nil - } - -dohash: - // This path will cost 1 hash and 1+ε comparisons. - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed) - h2 := uint8(h2(hash)) - ctrls = *g.ctrls() - slotKey = g.key(typ, 0) - - for range abi.SwissMapGroupSlots { - if uint8(ctrls) == h2 && key == *(*string)(slotKey) { - return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) - } - slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) - ctrls >>= 8 - } - return nil -} - -// Returns true if a and b might be equal. -// Returns false if a and b are definitely not equal. -// Requires len(a)>=8. -func longStringQuickEqualityTest(a, b string) bool { - if len(a) != len(b) { - return false - } - x, y := stringPtr(a), stringPtr(b) - // Check first 8 bytes. - if *(*[8]byte)(x) != *(*[8]byte)(y) { - return false - } - // Check last 8 bytes. - x = unsafe.Pointer(uintptr(x) + uintptr(len(a)) - 8) - y = unsafe.Pointer(uintptr(y) + uintptr(len(a)) - 8) - if *(*[8]byte)(x) != *(*[8]byte)(y) { - return false - } - return true -} -func stringPtr(s string) unsafe.Pointer { - type stringStruct struct { - ptr unsafe.Pointer - len int - } - return (*stringStruct)(unsafe.Pointer(&s)).ptr -} - -//go:linkname runtime_mapaccess1_faststr runtime.mapaccess1_faststr -func runtime_mapaccess1_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil - } - - if m.dirLen <= 0 { - elem := m.getWithoutKeySmallFastStr(typ, key) - if elem == nil { - return unsafe.Pointer(&zeroVal[0]) - } - return elem - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*string)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) - return slotElem - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]) - } - } -} - -//go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr -func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - return nil, false - } - - if m.dirLen <= 0 { - elem := m.getWithoutKeySmallFastStr(typ, key) - if elem == nil { - return unsafe.Pointer(&zeroVal[0]), false - } - return elem, true - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*string)(slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) - return slotElem, true - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]), false - } - } -} - -func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer { - g := groupReference{ - data: m.dirPtr, - } - - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*string)(slotKey) { - // Key needs update, as the backing storage may differ. - *(*string)(slotKey) = key - slotElem := g.elem(typ, i) - return slotElem - } - match = match.removeFirst() - } - - // There can't be deleted slots, small maps can't have them - // (see deleteSmall). Use matchEmptyOrDeleted as it is a bit - // more efficient than matchEmpty. - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - fatal("small map with no empty slot (concurrent map writes?)") - } - - i := match.first() - - slotKey := g.key(typ, i) - *(*string)(slotKey) = key - - slotElem := g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - m.used++ - - return slotElem -} - -//go:linkname runtime_mapassign_faststr runtime.mapassign_faststr -func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign_faststr) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - k := key - hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmallFastStr(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot - // we find, which we'll use to insert the new entry if - // necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - if key == *(*string)(slotKey) { - // Key needs update, as the backing - // storage may differ. - *(*string)(slotKey) = key - slotElem = g.elem(typ, i) - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmptyOrDeleted() - if match == 0 { - continue // nothing but filled slots. Keep probing. - } - i := match.first() - if g.ctrls().get(i) == ctrlDeleted { - // There are some deleted slots. Remember - // the first one, and keep probing. - if firstDeletedGroup.data == nil { - firstDeletedGroup = g - firstDeletedSlot = i - } - continue - } - // We've found an empty slot, which means we've reached the end of - // the probe sequence. - - // If we found a deleted slot along the way, we can - // replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } - - // If we have no space left, first try to remove some tombstones. - if t.growthLeft == 0 { - t.pruneTombstones(typ, m) - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - *(*string)(slotKey) = key - - slotElem = g.elem(typ, i) - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} - -//go:linkname runtime_mapdelete_faststr runtime.mapdelete_faststr -func runtime_mapdelete_faststr(typ *abi.SwissMapType, m *Map, key string) { - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapdelete_faststr) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - } - - if m == nil || m.Used() == 0 { - return - } - - m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) -} diff --git a/src/internal/runtime/maps/runtime_swiss.go b/src/internal/runtime/maps/runtime_swiss.go deleted file mode 100644 index 3ea018185b..0000000000 --- a/src/internal/runtime/maps/runtime_swiss.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package maps - -import ( - "internal/abi" - "internal/asan" - "internal/msan" - "internal/race" - "internal/runtime/sys" - "unsafe" -) - -// Functions below pushed from runtime. - -// Pushed from runtime in order to use runtime.plainError -// -//go:linkname errNilAssign -var errNilAssign error - -// Pull from runtime. It is important that is this the exact same copy as the -// runtime because runtime.mapaccess1_fat compares the returned pointer with -// &runtime.zeroVal[0]. -// TODO: move zeroVal to internal/abi? -// -//go:linkname zeroVal runtime.zeroVal -var zeroVal [abi.ZeroValSize]byte - -// mapaccess1 returns a pointer to h[key]. Never returns nil, instead -// it will return a reference to the zero object for the elem type if -// the key is not in the map. -// NOTE: The returned pointer may keep the whole map live, so don't -// hold onto it for very long. -// -//go:linkname runtime_mapaccess1 runtime.mapaccess1 -func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess1) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - race.ReadObjectPC(typ.Key, key, callerpc, pc) - } - if msan.Enabled && m != nil { - msan.Read(key, typ.Key.Size_) - } - if asan.Enabled && m != nil { - asan.Read(key, typ.Key.Size_) - } - - if m == nil || m.Used() == 0 { - if err := mapKeyError(typ, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]) - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - } - - hash := typ.Hasher(key, m.seed) - - if m.dirLen <= 0 { - _, elem, ok := m.getWithKeySmall(typ, hash, key) - if !ok { - return unsafe.Pointer(&zeroVal[0]) - } - return elem - } - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - slotKeyOrig := slotKey - if typ.IndirectKey() { - slotKey = *((*unsafe.Pointer)(slotKey)) - } - if typ.Key.Equal(key, slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) - if typ.IndirectElem() { - slotElem = *((*unsafe.Pointer)(slotElem)) - } - return slotElem - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]) - } - } -} - -//go:linkname runtime_mapaccess2 runtime.mapaccess2 -func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapaccess1) - race.ReadPC(unsafe.Pointer(m), callerpc, pc) - race.ReadObjectPC(typ.Key, key, callerpc, pc) - } - if msan.Enabled && m != nil { - msan.Read(key, typ.Key.Size_) - } - if asan.Enabled && m != nil { - asan.Read(key, typ.Key.Size_) - } - - if m == nil || m.Used() == 0 { - if err := mapKeyError(typ, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]), false - } - - if m.writing != 0 { - fatal("concurrent map read and map write") - } - - hash := typ.Hasher(key, m.seed) - - if m.dirLen == 0 { - _, elem, ok := m.getWithKeySmall(typ, hash, key) - if !ok { - return unsafe.Pointer(&zeroVal[0]), false - } - return elem, true - } - - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - // Probe table. - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - - match := g.ctrls().matchH2(h2(hash)) - - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - slotKeyOrig := slotKey - if typ.IndirectKey() { - slotKey = *((*unsafe.Pointer)(slotKey)) - } - if typ.Key.Equal(key, slotKey) { - slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) - if typ.IndirectElem() { - slotElem = *((*unsafe.Pointer)(slotElem)) - } - return slotElem, true - } - match = match.removeFirst() - } - - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - return unsafe.Pointer(&zeroVal[0]), false - } - } -} - -//go:linkname runtime_mapassign runtime.mapassign -func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { - if m == nil { - panic(errNilAssign) - } - if race.Enabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(runtime_mapassign) - race.WritePC(unsafe.Pointer(m), callerpc, pc) - race.ReadObjectPC(typ.Key, key, callerpc, pc) - } - if msan.Enabled { - msan.Read(key, typ.Key.Size_) - } - if asan.Enabled { - asan.Read(key, typ.Key.Size_) - } - if m.writing != 0 { - fatal("concurrent map writes") - } - - hash := typ.Hasher(key, m.seed) - - // Set writing after calling Hasher, since Hasher may panic, in which - // case we have not actually done a write. - m.writing ^= 1 // toggle, see comment on writing - - if m.dirPtr == nil { - m.growToSmall(typ) - } - - if m.dirLen == 0 { - if m.used < abi.SwissMapGroupSlots { - elem := m.putSlotSmall(typ, hash, key) - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return elem - } - - // Can't fit another entry, grow to full size map. - m.growToTable(typ) - } - - var slotElem unsafe.Pointer -outer: - for { - // Select table. - idx := m.directoryIndex(hash) - t := m.directoryAt(idx) - - seq := makeProbeSeq(h1(hash), t.groups.lengthMask) - - // As we look for a match, keep track of the first deleted slot - // we find, which we'll use to insert the new entry if - // necessary. - var firstDeletedGroup groupReference - var firstDeletedSlot uintptr - - for ; ; seq = seq.next() { - g := t.groups.group(typ, seq.offset) - match := g.ctrls().matchH2(h2(hash)) - - // Look for an existing slot containing this key. - for match != 0 { - i := match.first() - - slotKey := g.key(typ, i) - slotKeyOrig := slotKey - if typ.IndirectKey() { - slotKey = *((*unsafe.Pointer)(slotKey)) - } - if typ.Key.Equal(key, slotKey) { - if typ.NeedKeyUpdate() { - typedmemmove(typ.Key, slotKey, key) - } - - slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) - if typ.IndirectElem() { - slotElem = *((*unsafe.Pointer)(slotElem)) - } - - t.checkInvariants(typ, m) - break outer - } - match = match.removeFirst() - } - - // No existing slot for this key in this group. Is this the end - // of the probe sequence? - match = g.ctrls().matchEmpty() - if match != 0 { - // Finding an empty slot means we've reached the end of - // the probe sequence. - - var i uintptr - - // If we found a deleted slot along the way, we - // can replace it without consuming growthLeft. - if firstDeletedGroup.data != nil { - g = firstDeletedGroup - i = firstDeletedSlot - t.growthLeft++ // will be decremented below to become a no-op. - } else { - // Otherwise, use the empty slot. - i = match.first() - } - - // If there is room left to grow, just insert the new entry. - if t.growthLeft > 0 { - slotKey := g.key(typ, i) - slotKeyOrig := slotKey - if typ.IndirectKey() { - kmem := newobject(typ.Key) - *(*unsafe.Pointer)(slotKey) = kmem - slotKey = kmem - } - typedmemmove(typ.Key, slotKey, key) - - slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) - if typ.IndirectElem() { - emem := newobject(typ.Elem) - *(*unsafe.Pointer)(slotElem) = emem - slotElem = emem - } - - g.ctrls().set(i, ctrl(h2(hash))) - t.growthLeft-- - t.used++ - m.used++ - - t.checkInvariants(typ, m) - break outer - } - - t.rehash(typ, m) - continue outer - } - - // No empty slots in this group. Check for a deleted - // slot, which we'll use if we don't find a match later - // in the probe sequence. - // - // We only need to remember a single deleted slot. - if firstDeletedGroup.data == nil { - // Since we already checked for empty slots - // above, matches here must be deleted slots. - match = g.ctrls().matchEmptyOrDeleted() - if match != 0 { - firstDeletedGroup = g - firstDeletedSlot = match.first() - } - } - } - } - - if m.writing == 0 { - fatal("concurrent map writes") - } - m.writing ^= 1 - - return slotElem -} diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index cd3e306a57..da7b2d7764 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -12,7 +12,6 @@ import ( "go/token" "internal/asan" "internal/goarch" - "internal/goexperiment" "internal/msan" "internal/race" "internal/testenv" @@ -1277,10 +1276,6 @@ var deepEqualPerfTests = []struct { } func TestDeepEqualAllocs(t *testing.T) { - // TODO(prattmic): maps on stack - if goexperiment.SwissMap { - t.Skipf("Maps on stack not yet implemented") - } if asan.Enabled { t.Skip("test allocates more with -asan; see #70079") } @@ -7343,7 +7338,8 @@ func TestGCBits(t *testing.T) { verifyGCBits(t, TypeOf(([][10000]Xscalar)(nil)), lit(1)) verifyGCBits(t, SliceOf(ArrayOf(10000, Tscalar)), lit(1)) - testGCBitsMap(t) + // For maps, we don't manually construct GC data, instead using the + // public reflect API in groupAndSlotOf. } func rep(n int, b []byte) []byte { return bytes.Repeat(b, n) } diff --git a/src/reflect/export_noswiss_test.go b/src/reflect/export_noswiss_test.go deleted file mode 100644 index 286ba6300d..0000000000 --- a/src/reflect/export_noswiss_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2024 Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package reflect - -import ( - "unsafe" -) - -func MapBucketOf(x, y Type) Type { - return toType(bucketOf(x.common(), y.common())) -} - -func CachedBucketOf(m Type) Type { - t := m.(*rtype) - if Kind(t.t.Kind()) != Map { - panic("not map") - } - tt := (*mapType)(unsafe.Pointer(t)) - return toType(tt.Bucket) -} diff --git a/src/reflect/export_swiss_test.go b/src/reflect/export_swiss_test.go deleted file mode 100644 index ac3cd0adf7..0000000000 --- a/src/reflect/export_swiss_test.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2024 Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package reflect - -func MapGroupOf(x, y Type) Type { - grp, _ := groupAndSlotOf(x, y) - return grp -} diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index eedd063fcb..fc209fdfba 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -152,3 +152,8 @@ var MethodValueCallCodePtr = methodValueCallCodePtr var InternalIsZero = isZero var IsRegularMemory = isRegularMemory + +func MapGroupOf(x, y Type) Type { + grp, _ := groupAndSlotOf(x, y) + return grp +} diff --git a/src/reflect/map.go b/src/reflect/map.go new file mode 100644 index 0000000000..a9cc5b1162 --- /dev/null +++ b/src/reflect/map.go @@ -0,0 +1,449 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect + +import ( + "internal/abi" + "internal/race" + "internal/runtime/maps" + "internal/runtime/sys" + "unsafe" +) + +func (t *rtype) Key() Type { + if t.Kind() != Map { + panic("reflect: Key of non-map type " + t.String()) + } + tt := (*abi.SwissMapType)(unsafe.Pointer(t)) + return toType(tt.Key) +} + +// MapOf returns the map type with the given key and element types. +// For example, if k represents int and e represents string, +// MapOf(k, e) represents map[int]string. +// +// If the key type is not a valid map key type (that is, if it does +// not implement Go's == operator), MapOf panics. +func MapOf(key, elem Type) Type { + ktyp := key.common() + etyp := elem.common() + + if ktyp.Equal == nil { + panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) + } + + // Look in cache. + ckey := cacheKey{Map, ktyp, etyp, 0} + if mt, ok := lookupCache.Load(ckey); ok { + return mt.(Type) + } + + // Look in known types. + s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) + for _, tt := range typesByString(s) { + mt := (*abi.SwissMapType)(unsafe.Pointer(tt)) + if mt.Key == ktyp && mt.Elem == etyp { + ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) + return ti.(Type) + } + } + + group, slot := groupAndSlotOf(key, elem) + + // Make a map type. + // Note: flag values must match those used in the TMAP case + // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. + var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) + mt := **(**abi.SwissMapType)(unsafe.Pointer(&imap)) + mt.Str = resolveReflectName(newName(s, "", false, false)) + mt.TFlag = abi.TFlagDirectIface + mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) + mt.Key = ktyp + mt.Elem = etyp + mt.Group = group.common() + mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { + return typehash(ktyp, p, seed) + } + mt.GroupSize = mt.Group.Size() + mt.SlotSize = slot.Size() + mt.ElemOff = slot.Field(1).Offset + mt.Flags = 0 + if needKeyUpdate(ktyp) { + mt.Flags |= abi.SwissMapNeedKeyUpdate + } + if hashMightPanic(ktyp) { + mt.Flags |= abi.SwissMapHashMightPanic + } + if ktyp.Size_ > abi.SwissMapMaxKeyBytes { + mt.Flags |= abi.SwissMapIndirectKey + } + if etyp.Size_ > abi.SwissMapMaxKeyBytes { + mt.Flags |= abi.SwissMapIndirectElem + } + mt.PtrToThis = 0 + + ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) + return ti.(Type) +} + +func groupAndSlotOf(ktyp, etyp Type) (Type, Type) { + // type group struct { + // ctrl uint64 + // slots [abi.SwissMapGroupSlots]struct { + // key keyType + // elem elemType + // } + // } + + if ktyp.Size() > abi.SwissMapMaxKeyBytes { + ktyp = PointerTo(ktyp) + } + if etyp.Size() > abi.SwissMapMaxElemBytes { + etyp = PointerTo(etyp) + } + + fields := []StructField{ + { + Name: "Key", + Type: ktyp, + }, + { + Name: "Elem", + Type: etyp, + }, + } + slot := StructOf(fields) + + fields = []StructField{ + { + Name: "Ctrl", + Type: TypeFor[uint64](), + }, + { + Name: "Slots", + Type: ArrayOf(abi.SwissMapGroupSlots, slot), + }, + } + group := StructOf(fields) + return group, slot +} + +var stringType = rtypeOf("") + +// MapIndex returns the value associated with key in the map v. +// It panics if v's Kind is not [Map]. +// It returns the zero Value if key is not found in the map or if v represents a nil map. +// As in Go, the key's value must be assignable to the map's key type. +func (v Value) MapIndex(key Value) Value { + v.mustBe(Map) + tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ())) + + // Do not require key to be exported, so that DeepEqual + // and other programs can use all the keys returned by + // MapKeys as arguments to MapIndex. If either the map + // or the key is unexported, though, the result will be + // considered unexported. This is consistent with the + // behavior for structs, which allow read but not write + // of unexported fields. + + var e unsafe.Pointer + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes { + k := *(*string)(key.ptr) + e = mapaccess_faststr(v.typ(), v.pointer(), k) + } else { + key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil) + var k unsafe.Pointer + if key.flag&flagIndir != 0 { + k = key.ptr + } else { + k = unsafe.Pointer(&key.ptr) + } + e = mapaccess(v.typ(), v.pointer(), k) + } + if e == nil { + return Value{} + } + typ := tt.Elem + fl := (v.flag | key.flag).ro() + fl |= flag(typ.Kind()) + return copyVal(typ, fl, e) +} + +// Equivalent to runtime.mapIterStart. +// +//go:noinline +func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) { + if race.Enabled && m != nil { + callerpc := sys.GetCallerPC() + race.ReadPC(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart)) + } + + it.Init(t, m) + it.Next() +} + +// Equivalent to runtime.mapIterNext. +// +//go:noinline +func mapIterNext(it *maps.Iter) { + if race.Enabled { + callerpc := sys.GetCallerPC() + race.ReadPC(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapIterNext)) + } + + it.Next() +} + +// MapKeys returns a slice containing all the keys present in the map, +// in unspecified order. +// It panics if v's Kind is not [Map]. +// It returns an empty slice if v represents a nil map. +func (v Value) MapKeys() []Value { + v.mustBe(Map) + tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ())) + keyType := tt.Key + + fl := v.flag.ro() | flag(keyType.Kind()) + + // Escape analysis can't see that the map doesn't escape. It sees an + // escape from maps.IterStart, via assignment into it, even though it + // doesn't escape this function. + mptr := abi.NoEscape(v.pointer()) + m := (*maps.Map)(mptr) + mlen := int(0) + if m != nil { + mlen = maplen(mptr) + } + var it maps.Iter + mapIterStart(tt, m, &it) + a := make([]Value, mlen) + var i int + for i = 0; i < len(a); i++ { + key := it.Key() + if key == nil { + // Someone deleted an entry from the map since we + // called maplen above. It's a data race, but nothing + // we can do about it. + break + } + a[i] = copyVal(keyType, fl, key) + mapIterNext(&it) + } + return a[:i] +} + +// A MapIter is an iterator for ranging over a map. +// See [Value.MapRange]. +type MapIter struct { + m Value + hiter maps.Iter +} + +// Key returns the key of iter's current map entry. +func (iter *MapIter) Key() Value { + if !iter.hiter.Initialized() { + panic("MapIter.Key called before Next") + } + iterkey := iter.hiter.Key() + if iterkey == nil { + panic("MapIter.Key called on exhausted iterator") + } + + t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ())) + ktype := t.Key + return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey) +} + +// SetIterKey assigns to v the key of iter's current map entry. +// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value. +// As in Go, the key must be assignable to v's type and +// must not be derived from an unexported field. +// It panics if [Value.CanSet] returns false. +func (v Value) SetIterKey(iter *MapIter) { + if !iter.hiter.Initialized() { + panic("reflect: Value.SetIterKey called before Next") + } + iterkey := iter.hiter.Key() + if iterkey == nil { + panic("reflect: Value.SetIterKey called on exhausted iterator") + } + + v.mustBeAssignable() + var target unsafe.Pointer + if v.kind() == Interface { + target = v.ptr + } + + t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ())) + ktype := t.Key + + iter.m.mustBeExported() // do not let unexported m leak + key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir} + key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target) + typedmemmove(v.typ(), v.ptr, key.ptr) +} + +// Value returns the value of iter's current map entry. +func (iter *MapIter) Value() Value { + if !iter.hiter.Initialized() { + panic("MapIter.Value called before Next") + } + iterelem := iter.hiter.Elem() + if iterelem == nil { + panic("MapIter.Value called on exhausted iterator") + } + + t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ())) + vtype := t.Elem + return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem) +} + +// SetIterValue assigns to v the value of iter's current map entry. +// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value. +// As in Go, the value must be assignable to v's type and +// must not be derived from an unexported field. +// It panics if [Value.CanSet] returns false. +func (v Value) SetIterValue(iter *MapIter) { + if !iter.hiter.Initialized() { + panic("reflect: Value.SetIterValue called before Next") + } + iterelem := iter.hiter.Elem() + if iterelem == nil { + panic("reflect: Value.SetIterValue called on exhausted iterator") + } + + v.mustBeAssignable() + var target unsafe.Pointer + if v.kind() == Interface { + target = v.ptr + } + + t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ())) + vtype := t.Elem + + iter.m.mustBeExported() // do not let unexported m leak + elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir} + elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target) + typedmemmove(v.typ(), v.ptr, elem.ptr) +} + +// Next advances the map iterator and reports whether there is another +// entry. It returns false when iter is exhausted; subsequent +// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic. +func (iter *MapIter) Next() bool { + if !iter.m.IsValid() { + panic("MapIter.Next called on an iterator that does not have an associated map Value") + } + if !iter.hiter.Initialized() { + t := (*abi.SwissMapType)(unsafe.Pointer(iter.m.typ())) + m := (*maps.Map)(iter.m.pointer()) + mapIterStart(t, m, &iter.hiter) + } else { + if iter.hiter.Key() == nil { + panic("MapIter.Next called on exhausted iterator") + } + mapIterNext(&iter.hiter) + } + return iter.hiter.Key() != nil +} + +// Reset modifies iter to iterate over v. +// It panics if v's Kind is not [Map] and v is not the zero Value. +// Reset(Value{}) causes iter to not to refer to any map, +// which may allow the previously iterated-over map to be garbage collected. +func (iter *MapIter) Reset(v Value) { + if v.IsValid() { + v.mustBe(Map) + } + iter.m = v + iter.hiter = maps.Iter{} +} + +// MapRange returns a range iterator for a map. +// It panics if v's Kind is not [Map]. +// +// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry. +// [MapIter.Next] returns false when the iterator is exhausted. +// MapRange follows the same iteration semantics as a range statement. +// +// Example: +// +// iter := reflect.ValueOf(m).MapRange() +// for iter.Next() { +// k := iter.Key() +// v := iter.Value() +// ... +// } +func (v Value) MapRange() *MapIter { + // This is inlinable to take advantage of "function outlining". + // The allocation of MapIter can be stack allocated if the caller + // does not allow it to escape. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + if v.kind() != Map { + v.panicNotMap() + } + return &MapIter{m: v} +} + +// SetMapIndex sets the element associated with key in the map v to elem. +// It panics if v's Kind is not [Map]. +// If elem is the zero Value, SetMapIndex deletes the key from the map. +// Otherwise if v holds a nil map, SetMapIndex will panic. +// As in Go, key's elem must be assignable to the map's key type, +// and elem's value must be assignable to the map's elem type. +func (v Value) SetMapIndex(key, elem Value) { + v.mustBe(Map) + v.mustBeExported() + key.mustBeExported() + tt := (*abi.SwissMapType)(unsafe.Pointer(v.typ())) + + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes { + k := *(*string)(key.ptr) + if elem.typ() == nil { + mapdelete_faststr(v.typ(), v.pointer(), k) + return + } + elem.mustBeExported() + elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) + var e unsafe.Pointer + if elem.flag&flagIndir != 0 { + e = elem.ptr + } else { + e = unsafe.Pointer(&elem.ptr) + } + mapassign_faststr(v.typ(), v.pointer(), k, e) + return + } + + key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil) + var k unsafe.Pointer + if key.flag&flagIndir != 0 { + k = key.ptr + } else { + k = unsafe.Pointer(&key.ptr) + } + if elem.typ() == nil { + mapdelete(v.typ(), v.pointer(), k) + return + } + elem.mustBeExported() + elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) + var e unsafe.Pointer + if elem.flag&flagIndir != 0 { + e = elem.ptr + } else { + e = unsafe.Pointer(&elem.ptr) + } + mapassign(v.typ(), v.pointer(), k, e) +} + +// Force slow panicking path not inlined, so it won't add to the +// inlining budget of the caller. +// TODO: undo when the inliner is no longer bottom-up only. +// +//go:noinline +func (f flag) panicNotMap() { + f.mustBe(Map) +} diff --git a/src/reflect/map_noswiss.go b/src/reflect/map_noswiss.go deleted file mode 100644 index 36f6fab76c..0000000000 --- a/src/reflect/map_noswiss.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package reflect - -import ( - "internal/abi" - "internal/goarch" - "unsafe" -) - -// mapType represents a map type. -type mapType struct { - abi.OldMapType -} - -// Pushed from runtime. - -//go:noescape -func mapiterinit(t *abi.Type, m unsafe.Pointer, it *hiter) - -//go:noescape -func mapiternext(it *hiter) - -func (t *rtype) Key() Type { - if t.Kind() != Map { - panic("reflect: Key of non-map type " + t.String()) - } - tt := (*mapType)(unsafe.Pointer(t)) - return toType(tt.Key) -} - -// MapOf returns the map type with the given key and element types. -// For example, if k represents int and e represents string, -// MapOf(k, e) represents map[int]string. -// -// If the key type is not a valid map key type (that is, if it does -// not implement Go's == operator), MapOf panics. -func MapOf(key, elem Type) Type { - ktyp := key.common() - etyp := elem.common() - - if ktyp.Equal == nil { - panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) - } - - // Look in cache. - ckey := cacheKey{Map, ktyp, etyp, 0} - if mt, ok := lookupCache.Load(ckey); ok { - return mt.(Type) - } - - // Look in known types. - s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) - for _, tt := range typesByString(s) { - mt := (*mapType)(unsafe.Pointer(tt)) - if mt.Key == ktyp && mt.Elem == etyp { - ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) - return ti.(Type) - } - } - - // Make a map type. - // Note: flag values must match those used in the TMAP case - // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. - var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) - mt := **(**mapType)(unsafe.Pointer(&imap)) - mt.Str = resolveReflectName(newName(s, "", false, false)) - mt.TFlag = abi.TFlagDirectIface - mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) - mt.Key = ktyp - mt.Elem = etyp - mt.Bucket = bucketOf(ktyp, etyp) - mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { - return typehash(ktyp, p, seed) - } - mt.Flags = 0 - if ktyp.Size_ > abi.OldMapMaxKeyBytes { - mt.KeySize = uint8(goarch.PtrSize) - mt.Flags |= 1 // indirect key - } else { - mt.KeySize = uint8(ktyp.Size_) - } - if etyp.Size_ > abi.OldMapMaxElemBytes { - mt.ValueSize = uint8(goarch.PtrSize) - mt.Flags |= 2 // indirect value - } else { - mt.ValueSize = uint8(etyp.Size_) - } - mt.BucketSize = uint16(mt.Bucket.Size_) - if isReflexive(ktyp) { - mt.Flags |= 4 - } - if needKeyUpdate(ktyp) { - mt.Flags |= 8 - } - if hashMightPanic(ktyp) { - mt.Flags |= 16 - } - mt.PtrToThis = 0 - - ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) - return ti.(Type) -} - -func bucketOf(ktyp, etyp *abi.Type) *abi.Type { - if ktyp.Size_ > abi.OldMapMaxKeyBytes { - ktyp = ptrTo(ktyp) - } - if etyp.Size_ > abi.OldMapMaxElemBytes { - etyp = ptrTo(etyp) - } - - // Prepare GC data if any. - // A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+ptrSize bytes, - // or 2064 bytes, or 258 pointer-size words, or 33 bytes of pointer bitmap. - // Note that since the key and value are known to be <= 128 bytes, - // they're guaranteed to have bitmaps instead of GC programs. - var gcdata *byte - var ptrdata uintptr - - size := abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize - if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { - panic("reflect: bad size computation in MapOf") - } - - if ktyp.Pointers() || etyp.Pointers() { - nptr := (abi.OldMapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize - n := (nptr + 7) / 8 - - // Runtime needs pointer masks to be a multiple of uintptr in size. - n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) - mask := make([]byte, n) - base := uintptr(abi.OldMapBucketCount / goarch.PtrSize) - - if ktyp.Pointers() { - emitGCMask(mask, base, ktyp, abi.OldMapBucketCount) - } - base += abi.OldMapBucketCount * ktyp.Size_ / goarch.PtrSize - - if etyp.Pointers() { - emitGCMask(mask, base, etyp, abi.OldMapBucketCount) - } - base += abi.OldMapBucketCount * etyp.Size_ / goarch.PtrSize - - word := base - mask[word/8] |= 1 << (word % 8) - gcdata = &mask[0] - ptrdata = (word + 1) * goarch.PtrSize - - // overflow word must be last - if ptrdata != size { - panic("reflect: bad layout computation in MapOf") - } - } - - b := &abi.Type{ - Align_: goarch.PtrSize, - Size_: size, - Kind_: abi.Struct, - PtrBytes: ptrdata, - GCData: gcdata, - } - s := "bucket(" + stringFor(ktyp) + "," + stringFor(etyp) + ")" - b.Str = resolveReflectName(newName(s, "", false, false)) - return b -} - -var stringType = rtypeOf("") - -// MapIndex returns the value associated with key in the map v. -// It panics if v's Kind is not [Map]. -// It returns the zero Value if key is not found in the map or if v represents a nil map. -// As in Go, the key's value must be assignable to the map's key type. -func (v Value) MapIndex(key Value) Value { - v.mustBe(Map) - tt := (*mapType)(unsafe.Pointer(v.typ())) - - // Do not require key to be exported, so that DeepEqual - // and other programs can use all the keys returned by - // MapKeys as arguments to MapIndex. If either the map - // or the key is unexported, though, the result will be - // considered unexported. This is consistent with the - // behavior for structs, which allow read but not write - // of unexported fields. - - var e unsafe.Pointer - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes { - k := *(*string)(key.ptr) - e = mapaccess_faststr(v.typ(), v.pointer(), k) - } else { - key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil) - var k unsafe.Pointer - if key.flag&flagIndir != 0 { - k = key.ptr - } else { - k = unsafe.Pointer(&key.ptr) - } - e = mapaccess(v.typ(), v.pointer(), k) - } - if e == nil { - return Value{} - } - typ := tt.Elem - fl := (v.flag | key.flag).ro() - fl |= flag(typ.Kind()) - return copyVal(typ, fl, e) -} - -// MapKeys returns a slice containing all the keys present in the map, -// in unspecified order. -// It panics if v's Kind is not [Map]. -// It returns an empty slice if v represents a nil map. -func (v Value) MapKeys() []Value { - v.mustBe(Map) - tt := (*mapType)(unsafe.Pointer(v.typ())) - keyType := tt.Key - - fl := v.flag.ro() | flag(keyType.Kind()) - - m := v.pointer() - mlen := int(0) - if m != nil { - mlen = maplen(m) - } - var it hiter - mapiterinit(v.typ(), m, &it) - a := make([]Value, mlen) - var i int - for i = 0; i < len(a); i++ { - key := it.key - if key == nil { - // Someone deleted an entry from the map since we - // called maplen above. It's a data race, but nothing - // we can do about it. - break - } - a[i] = copyVal(keyType, fl, key) - mapiternext(&it) - } - return a[:i] -} - -// hiter's structure matches runtime.hiter's structure. -// Having a clone here allows us to embed a map iterator -// inside type MapIter so that MapIters can be re-used -// without doing any allocations. -type hiter struct { - key unsafe.Pointer - elem unsafe.Pointer - t unsafe.Pointer - h unsafe.Pointer - buckets unsafe.Pointer - bptr unsafe.Pointer - overflow *[]unsafe.Pointer - oldoverflow *[]unsafe.Pointer - startBucket uintptr - offset uint8 - wrapped bool - B uint8 - i uint8 - bucket uintptr - checkBucket uintptr - clearSeq uint64 -} - -func (h *hiter) initialized() bool { - return h.t != nil -} - -// A MapIter is an iterator for ranging over a map. -// See [Value.MapRange]. -type MapIter struct { - m Value - hiter hiter -} - -// Key returns the key of iter's current map entry. -func (iter *MapIter) Key() Value { - if !iter.hiter.initialized() { - panic("MapIter.Key called before Next") - } - iterkey := iter.hiter.key - if iterkey == nil { - panic("MapIter.Key called on exhausted iterator") - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - ktype := t.Key - return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey) -} - -// SetIterKey assigns to v the key of iter's current map entry. -// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value. -// As in Go, the key must be assignable to v's type and -// must not be derived from an unexported field. -// It panics if [Value.CanSet] returns false. -func (v Value) SetIterKey(iter *MapIter) { - if !iter.hiter.initialized() { - panic("reflect: Value.SetIterKey called before Next") - } - iterkey := iter.hiter.key - if iterkey == nil { - panic("reflect: Value.SetIterKey called on exhausted iterator") - } - - v.mustBeAssignable() - var target unsafe.Pointer - if v.kind() == Interface { - target = v.ptr - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - ktype := t.Key - - iter.m.mustBeExported() // do not let unexported m leak - key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir} - key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target) - typedmemmove(v.typ(), v.ptr, key.ptr) -} - -// Value returns the value of iter's current map entry. -func (iter *MapIter) Value() Value { - if !iter.hiter.initialized() { - panic("MapIter.Value called before Next") - } - iterelem := iter.hiter.elem - if iterelem == nil { - panic("MapIter.Value called on exhausted iterator") - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - vtype := t.Elem - return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem) -} - -// SetIterValue assigns to v the value of iter's current map entry. -// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value. -// As in Go, the value must be assignable to v's type and -// must not be derived from an unexported field. -// It panics if [Value.CanSet] returns false. -func (v Value) SetIterValue(iter *MapIter) { - if !iter.hiter.initialized() { - panic("reflect: Value.SetIterValue called before Next") - } - iterelem := iter.hiter.elem - if iterelem == nil { - panic("reflect: Value.SetIterValue called on exhausted iterator") - } - - v.mustBeAssignable() - var target unsafe.Pointer - if v.kind() == Interface { - target = v.ptr - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - vtype := t.Elem - - iter.m.mustBeExported() // do not let unexported m leak - elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir} - elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target) - typedmemmove(v.typ(), v.ptr, elem.ptr) -} - -// Next advances the map iterator and reports whether there is another -// entry. It returns false when iter is exhausted; subsequent -// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic. -func (iter *MapIter) Next() bool { - if !iter.m.IsValid() { - panic("MapIter.Next called on an iterator that does not have an associated map Value") - } - if !iter.hiter.initialized() { - mapiterinit(iter.m.typ(), iter.m.pointer(), &iter.hiter) - } else { - if iter.hiter.key == nil { - panic("MapIter.Next called on exhausted iterator") - } - mapiternext(&iter.hiter) - } - return iter.hiter.key != nil -} - -// Reset modifies iter to iterate over v. -// It panics if v's Kind is not [Map] and v is not the zero Value. -// Reset(Value{}) causes iter to not to refer to any map, -// which may allow the previously iterated-over map to be garbage collected. -func (iter *MapIter) Reset(v Value) { - if v.IsValid() { - v.mustBe(Map) - } - iter.m = v - iter.hiter = hiter{} -} - -// MapRange returns a range iterator for a map. -// It panics if v's Kind is not [Map]. -// -// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry. -// [MapIter.Next] returns false when the iterator is exhausted. -// MapRange follows the same iteration semantics as a range statement. -// -// Example: -// -// iter := reflect.ValueOf(m).MapRange() -// for iter.Next() { -// k := iter.Key() -// v := iter.Value() -// ... -// } -func (v Value) MapRange() *MapIter { - // This is inlinable to take advantage of "function outlining". - // The allocation of MapIter can be stack allocated if the caller - // does not allow it to escape. - // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ - if v.kind() != Map { - v.panicNotMap() - } - return &MapIter{m: v} -} - -// SetMapIndex sets the element associated with key in the map v to elem. -// It panics if v's Kind is not [Map]. -// If elem is the zero Value, SetMapIndex deletes the key from the map. -// Otherwise if v holds a nil map, SetMapIndex will panic. -// As in Go, key's elem must be assignable to the map's key type, -// and elem's value must be assignable to the map's elem type. -func (v Value) SetMapIndex(key, elem Value) { - v.mustBe(Map) - v.mustBeExported() - key.mustBeExported() - tt := (*mapType)(unsafe.Pointer(v.typ())) - - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.OldMapMaxElemBytes { - k := *(*string)(key.ptr) - if elem.typ() == nil { - mapdelete_faststr(v.typ(), v.pointer(), k) - return - } - elem.mustBeExported() - elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) - var e unsafe.Pointer - if elem.flag&flagIndir != 0 { - e = elem.ptr - } else { - e = unsafe.Pointer(&elem.ptr) - } - mapassign_faststr(v.typ(), v.pointer(), k, e) - return - } - - key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil) - var k unsafe.Pointer - if key.flag&flagIndir != 0 { - k = key.ptr - } else { - k = unsafe.Pointer(&key.ptr) - } - if elem.typ() == nil { - mapdelete(v.typ(), v.pointer(), k) - return - } - elem.mustBeExported() - elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) - var e unsafe.Pointer - if elem.flag&flagIndir != 0 { - e = elem.ptr - } else { - e = unsafe.Pointer(&elem.ptr) - } - mapassign(v.typ(), v.pointer(), k, e) -} - -// Force slow panicking path not inlined, so it won't add to the -// inlining budget of the caller. -// TODO: undo when the inliner is no longer bottom-up only. -// -//go:noinline -func (f flag) panicNotMap() { - f.mustBe(Map) -} diff --git a/src/reflect/map_noswiss_test.go b/src/reflect/map_noswiss_test.go deleted file mode 100644 index 52fcf89535..0000000000 --- a/src/reflect/map_noswiss_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package reflect_test - -import ( - "internal/abi" - "internal/goarch" - . "reflect" - "testing" -) - -func testGCBitsMap(t *testing.T) { - const bucketCount = abi.OldMapBucketCount - - hdr := make([]byte, bucketCount/goarch.PtrSize) - - verifyMapBucket := func(t *testing.T, k, e Type, m any, want []byte) { - verifyGCBits(t, MapBucketOf(k, e), want) - verifyGCBits(t, CachedBucketOf(TypeOf(m)), want) - } - verifyMapBucket(t, - Tscalar, Tptr, - map[Xscalar]Xptr(nil), - join(hdr, rep(bucketCount, lit(0)), rep(bucketCount, lit(1)), lit(1))) - verifyMapBucket(t, - Tscalarptr, Tptr, - map[Xscalarptr]Xptr(nil), - join(hdr, rep(bucketCount, lit(0, 1)), rep(bucketCount, lit(1)), lit(1))) - verifyMapBucket(t, Tint64, Tptr, - map[int64]Xptr(nil), - join(hdr, rep(bucketCount, rep(8/goarch.PtrSize, lit(0))), rep(bucketCount, lit(1)), lit(1))) - verifyMapBucket(t, - Tscalar, Tscalar, - map[Xscalar]Xscalar(nil), - empty) - verifyMapBucket(t, - ArrayOf(2, Tscalarptr), ArrayOf(3, Tptrscalar), - map[[2]Xscalarptr][3]Xptrscalar(nil), - join(hdr, rep(bucketCount*2, lit(0, 1)), rep(bucketCount*3, lit(1, 0)), lit(1))) - verifyMapBucket(t, - ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar), - map[[64 / goarch.PtrSize]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil), - join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1))) - verifyMapBucket(t, - ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize, Tptrscalar), - map[[64/goarch.PtrSize + 1]Xscalarptr][64 / goarch.PtrSize]Xptrscalar(nil), - join(hdr, rep(bucketCount, lit(1)), rep(bucketCount*64/goarch.PtrSize, lit(1, 0)), lit(1))) - verifyMapBucket(t, - ArrayOf(64/goarch.PtrSize, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar), - map[[64 / goarch.PtrSize]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil), - join(hdr, rep(bucketCount*64/goarch.PtrSize, lit(0, 1)), rep(bucketCount, lit(1)), lit(1))) - verifyMapBucket(t, - ArrayOf(64/goarch.PtrSize+1, Tscalarptr), ArrayOf(64/goarch.PtrSize+1, Tptrscalar), - map[[64/goarch.PtrSize + 1]Xscalarptr][64/goarch.PtrSize + 1]Xptrscalar(nil), - join(hdr, rep(bucketCount, lit(1)), rep(bucketCount, lit(1)), lit(1))) -} diff --git a/src/reflect/map_swiss.go b/src/reflect/map_swiss.go deleted file mode 100644 index 2f31d6689f..0000000000 --- a/src/reflect/map_swiss.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package reflect - -import ( - "internal/abi" - "internal/race" - "internal/runtime/maps" - "internal/runtime/sys" - "unsafe" -) - -// mapType represents a map type. -// -// TODO(prattmic): Only used within this file, could be cleaned up. -type mapType = abi.SwissMapType - -func (t *rtype) Key() Type { - if t.Kind() != Map { - panic("reflect: Key of non-map type " + t.String()) - } - tt := (*mapType)(unsafe.Pointer(t)) - return toType(tt.Key) -} - -// MapOf returns the map type with the given key and element types. -// For example, if k represents int and e represents string, -// MapOf(k, e) represents map[int]string. -// -// If the key type is not a valid map key type (that is, if it does -// not implement Go's == operator), MapOf panics. -func MapOf(key, elem Type) Type { - ktyp := key.common() - etyp := elem.common() - - if ktyp.Equal == nil { - panic("reflect.MapOf: invalid key type " + stringFor(ktyp)) - } - - // Look in cache. - ckey := cacheKey{Map, ktyp, etyp, 0} - if mt, ok := lookupCache.Load(ckey); ok { - return mt.(Type) - } - - // Look in known types. - s := "map[" + stringFor(ktyp) + "]" + stringFor(etyp) - for _, tt := range typesByString(s) { - mt := (*mapType)(unsafe.Pointer(tt)) - if mt.Key == ktyp && mt.Elem == etyp { - ti, _ := lookupCache.LoadOrStore(ckey, toRType(tt)) - return ti.(Type) - } - } - - group, slot := groupAndSlotOf(key, elem) - - // Make a map type. - // Note: flag values must match those used in the TMAP case - // in ../cmd/compile/internal/reflectdata/reflect.go:writeType. - var imap any = (map[unsafe.Pointer]unsafe.Pointer)(nil) - mt := **(**mapType)(unsafe.Pointer(&imap)) - mt.Str = resolveReflectName(newName(s, "", false, false)) - mt.TFlag = abi.TFlagDirectIface - mt.Hash = fnv1(etyp.Hash, 'm', byte(ktyp.Hash>>24), byte(ktyp.Hash>>16), byte(ktyp.Hash>>8), byte(ktyp.Hash)) - mt.Key = ktyp - mt.Elem = etyp - mt.Group = group.common() - mt.Hasher = func(p unsafe.Pointer, seed uintptr) uintptr { - return typehash(ktyp, p, seed) - } - mt.GroupSize = mt.Group.Size() - mt.SlotSize = slot.Size() - mt.ElemOff = slot.Field(1).Offset - mt.Flags = 0 - if needKeyUpdate(ktyp) { - mt.Flags |= abi.SwissMapNeedKeyUpdate - } - if hashMightPanic(ktyp) { - mt.Flags |= abi.SwissMapHashMightPanic - } - if ktyp.Size_ > abi.SwissMapMaxKeyBytes { - mt.Flags |= abi.SwissMapIndirectKey - } - if etyp.Size_ > abi.SwissMapMaxKeyBytes { - mt.Flags |= abi.SwissMapIndirectElem - } - mt.PtrToThis = 0 - - ti, _ := lookupCache.LoadOrStore(ckey, toRType(&mt.Type)) - return ti.(Type) -} - -func groupAndSlotOf(ktyp, etyp Type) (Type, Type) { - // type group struct { - // ctrl uint64 - // slots [abi.SwissMapGroupSlots]struct { - // key keyType - // elem elemType - // } - // } - - if ktyp.Size() > abi.SwissMapMaxKeyBytes { - ktyp = PointerTo(ktyp) - } - if etyp.Size() > abi.SwissMapMaxElemBytes { - etyp = PointerTo(etyp) - } - - fields := []StructField{ - { - Name: "Key", - Type: ktyp, - }, - { - Name: "Elem", - Type: etyp, - }, - } - slot := StructOf(fields) - - fields = []StructField{ - { - Name: "Ctrl", - Type: TypeFor[uint64](), - }, - { - Name: "Slots", - Type: ArrayOf(abi.SwissMapGroupSlots, slot), - }, - } - group := StructOf(fields) - return group, slot -} - -var stringType = rtypeOf("") - -// MapIndex returns the value associated with key in the map v. -// It panics if v's Kind is not [Map]. -// It returns the zero Value if key is not found in the map or if v represents a nil map. -// As in Go, the key's value must be assignable to the map's key type. -func (v Value) MapIndex(key Value) Value { - v.mustBe(Map) - tt := (*mapType)(unsafe.Pointer(v.typ())) - - // Do not require key to be exported, so that DeepEqual - // and other programs can use all the keys returned by - // MapKeys as arguments to MapIndex. If either the map - // or the key is unexported, though, the result will be - // considered unexported. This is consistent with the - // behavior for structs, which allow read but not write - // of unexported fields. - - var e unsafe.Pointer - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes { - k := *(*string)(key.ptr) - e = mapaccess_faststr(v.typ(), v.pointer(), k) - } else { - key = key.assignTo("reflect.Value.MapIndex", tt.Key, nil) - var k unsafe.Pointer - if key.flag&flagIndir != 0 { - k = key.ptr - } else { - k = unsafe.Pointer(&key.ptr) - } - e = mapaccess(v.typ(), v.pointer(), k) - } - if e == nil { - return Value{} - } - typ := tt.Elem - fl := (v.flag | key.flag).ro() - fl |= flag(typ.Kind()) - return copyVal(typ, fl, e) -} - -// Equivalent to runtime.mapIterStart. -// -//go:noinline -func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) { - if race.Enabled && m != nil { - callerpc := sys.GetCallerPC() - race.ReadPC(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart)) - } - - it.Init(t, m) - it.Next() -} - -// Equivalent to runtime.mapIterNext. -// -//go:noinline -func mapIterNext(it *maps.Iter) { - if race.Enabled { - callerpc := sys.GetCallerPC() - race.ReadPC(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapIterNext)) - } - - it.Next() -} - -// MapKeys returns a slice containing all the keys present in the map, -// in unspecified order. -// It panics if v's Kind is not [Map]. -// It returns an empty slice if v represents a nil map. -func (v Value) MapKeys() []Value { - v.mustBe(Map) - tt := (*mapType)(unsafe.Pointer(v.typ())) - keyType := tt.Key - - fl := v.flag.ro() | flag(keyType.Kind()) - - // Escape analysis can't see that the map doesn't escape. It sees an - // escape from maps.IterStart, via assignment into it, even though it - // doesn't escape this function. - mptr := abi.NoEscape(v.pointer()) - m := (*maps.Map)(mptr) - mlen := int(0) - if m != nil { - mlen = maplen(mptr) - } - var it maps.Iter - mapIterStart(tt, m, &it) - a := make([]Value, mlen) - var i int - for i = 0; i < len(a); i++ { - key := it.Key() - if key == nil { - // Someone deleted an entry from the map since we - // called maplen above. It's a data race, but nothing - // we can do about it. - break - } - a[i] = copyVal(keyType, fl, key) - mapIterNext(&it) - } - return a[:i] -} - -// A MapIter is an iterator for ranging over a map. -// See [Value.MapRange]. -type MapIter struct { - m Value - hiter maps.Iter -} - -// TODO(prattmic): only for sharing the linkname declarations with old maps. -// Remove with old maps. -type hiter = maps.Iter - -// Key returns the key of iter's current map entry. -func (iter *MapIter) Key() Value { - if !iter.hiter.Initialized() { - panic("MapIter.Key called before Next") - } - iterkey := iter.hiter.Key() - if iterkey == nil { - panic("MapIter.Key called on exhausted iterator") - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - ktype := t.Key - return copyVal(ktype, iter.m.flag.ro()|flag(ktype.Kind()), iterkey) -} - -// SetIterKey assigns to v the key of iter's current map entry. -// It is equivalent to v.Set(iter.Key()), but it avoids allocating a new Value. -// As in Go, the key must be assignable to v's type and -// must not be derived from an unexported field. -// It panics if [Value.CanSet] returns false. -func (v Value) SetIterKey(iter *MapIter) { - if !iter.hiter.Initialized() { - panic("reflect: Value.SetIterKey called before Next") - } - iterkey := iter.hiter.Key() - if iterkey == nil { - panic("reflect: Value.SetIterKey called on exhausted iterator") - } - - v.mustBeAssignable() - var target unsafe.Pointer - if v.kind() == Interface { - target = v.ptr - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - ktype := t.Key - - iter.m.mustBeExported() // do not let unexported m leak - key := Value{ktype, iterkey, iter.m.flag | flag(ktype.Kind()) | flagIndir} - key = key.assignTo("reflect.MapIter.SetKey", v.typ(), target) - typedmemmove(v.typ(), v.ptr, key.ptr) -} - -// Value returns the value of iter's current map entry. -func (iter *MapIter) Value() Value { - if !iter.hiter.Initialized() { - panic("MapIter.Value called before Next") - } - iterelem := iter.hiter.Elem() - if iterelem == nil { - panic("MapIter.Value called on exhausted iterator") - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - vtype := t.Elem - return copyVal(vtype, iter.m.flag.ro()|flag(vtype.Kind()), iterelem) -} - -// SetIterValue assigns to v the value of iter's current map entry. -// It is equivalent to v.Set(iter.Value()), but it avoids allocating a new Value. -// As in Go, the value must be assignable to v's type and -// must not be derived from an unexported field. -// It panics if [Value.CanSet] returns false. -func (v Value) SetIterValue(iter *MapIter) { - if !iter.hiter.Initialized() { - panic("reflect: Value.SetIterValue called before Next") - } - iterelem := iter.hiter.Elem() - if iterelem == nil { - panic("reflect: Value.SetIterValue called on exhausted iterator") - } - - v.mustBeAssignable() - var target unsafe.Pointer - if v.kind() == Interface { - target = v.ptr - } - - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - vtype := t.Elem - - iter.m.mustBeExported() // do not let unexported m leak - elem := Value{vtype, iterelem, iter.m.flag | flag(vtype.Kind()) | flagIndir} - elem = elem.assignTo("reflect.MapIter.SetValue", v.typ(), target) - typedmemmove(v.typ(), v.ptr, elem.ptr) -} - -// Next advances the map iterator and reports whether there is another -// entry. It returns false when iter is exhausted; subsequent -// calls to [MapIter.Key], [MapIter.Value], or [MapIter.Next] will panic. -func (iter *MapIter) Next() bool { - if !iter.m.IsValid() { - panic("MapIter.Next called on an iterator that does not have an associated map Value") - } - if !iter.hiter.Initialized() { - t := (*mapType)(unsafe.Pointer(iter.m.typ())) - m := (*maps.Map)(iter.m.pointer()) - mapIterStart(t, m, &iter.hiter) - } else { - if iter.hiter.Key() == nil { - panic("MapIter.Next called on exhausted iterator") - } - mapIterNext(&iter.hiter) - } - return iter.hiter.Key() != nil -} - -// Reset modifies iter to iterate over v. -// It panics if v's Kind is not [Map] and v is not the zero Value. -// Reset(Value{}) causes iter to not to refer to any map, -// which may allow the previously iterated-over map to be garbage collected. -func (iter *MapIter) Reset(v Value) { - if v.IsValid() { - v.mustBe(Map) - } - iter.m = v - iter.hiter = maps.Iter{} -} - -// MapRange returns a range iterator for a map. -// It panics if v's Kind is not [Map]. -// -// Call [MapIter.Next] to advance the iterator, and [MapIter.Key]/[MapIter.Value] to access each entry. -// [MapIter.Next] returns false when the iterator is exhausted. -// MapRange follows the same iteration semantics as a range statement. -// -// Example: -// -// iter := reflect.ValueOf(m).MapRange() -// for iter.Next() { -// k := iter.Key() -// v := iter.Value() -// ... -// } -func (v Value) MapRange() *MapIter { - // This is inlinable to take advantage of "function outlining". - // The allocation of MapIter can be stack allocated if the caller - // does not allow it to escape. - // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ - if v.kind() != Map { - v.panicNotMap() - } - return &MapIter{m: v} -} - -// SetMapIndex sets the element associated with key in the map v to elem. -// It panics if v's Kind is not [Map]. -// If elem is the zero Value, SetMapIndex deletes the key from the map. -// Otherwise if v holds a nil map, SetMapIndex will panic. -// As in Go, key's elem must be assignable to the map's key type, -// and elem's value must be assignable to the map's elem type. -func (v Value) SetMapIndex(key, elem Value) { - v.mustBe(Map) - v.mustBeExported() - key.mustBeExported() - tt := (*mapType)(unsafe.Pointer(v.typ())) - - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.SwissMapMaxElemBytes { - k := *(*string)(key.ptr) - if elem.typ() == nil { - mapdelete_faststr(v.typ(), v.pointer(), k) - return - } - elem.mustBeExported() - elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) - var e unsafe.Pointer - if elem.flag&flagIndir != 0 { - e = elem.ptr - } else { - e = unsafe.Pointer(&elem.ptr) - } - mapassign_faststr(v.typ(), v.pointer(), k, e) - return - } - - key = key.assignTo("reflect.Value.SetMapIndex", tt.Key, nil) - var k unsafe.Pointer - if key.flag&flagIndir != 0 { - k = key.ptr - } else { - k = unsafe.Pointer(&key.ptr) - } - if elem.typ() == nil { - mapdelete(v.typ(), v.pointer(), k) - return - } - elem.mustBeExported() - elem = elem.assignTo("reflect.Value.SetMapIndex", tt.Elem, nil) - var e unsafe.Pointer - if elem.flag&flagIndir != 0 { - e = elem.ptr - } else { - e = unsafe.Pointer(&elem.ptr) - } - mapassign(v.typ(), v.pointer(), k, e) -} - -// Force slow panicking path not inlined, so it won't add to the -// inlining budget of the caller. -// TODO: undo when the inliner is no longer bottom-up only. -// -//go:noinline -func (f flag) panicNotMap() { - f.mustBe(Map) -} diff --git a/src/reflect/map_swiss_test.go b/src/reflect/map_swiss_test.go deleted file mode 100644 index 621140aa60..0000000000 --- a/src/reflect/map_swiss_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package reflect_test - -import ( - "reflect" - "testing" -) - -func testGCBitsMap(t *testing.T) { - // Unlike old maps, we don't manually construct GC data for swiss maps, - // instead using the public reflect API in groupAndSlotOf. -} - -// See also runtime_test.TestGroupSizeZero. -func TestGroupSizeZero(t *testing.T) { - st := reflect.TypeFor[struct{}]() - grp := reflect.MapGroupOf(st, st) - - // internal/runtime/maps when create pointers to slots, even if slots - // are size 0. We should have reserved an extra word to ensure that - // pointers to the zero-size type at the end of group are valid. - if grp.Size() <= 8 { - t.Errorf("Group size got %d want >8", grp.Size()) - } -} diff --git a/src/reflect/map_test.go b/src/reflect/map_test.go new file mode 100644 index 0000000000..621b5fdd73 --- /dev/null +++ b/src/reflect/map_test.go @@ -0,0 +1,23 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package reflect_test + +import ( + "reflect" + "testing" +) + +// See also runtime_test.TestGroupSizeZero. +func TestGroupSizeZero(t *testing.T) { + st := reflect.TypeFor[struct{}]() + grp := reflect.MapGroupOf(st, st) + + // internal/runtime/maps when create pointers to slots, even if slots + // are size 0. We should have reserved an extra word to ensure that + // pointers to the zero-size type at the end of group are valid. + if grp.Size() <= 8 { + t.Errorf("Group size got %d want >8", grp.Size()) + } +} diff --git a/src/runtime/export_map_noswiss_test.go b/src/runtime/export_map_noswiss_test.go deleted file mode 100644 index 4638afa6b8..0000000000 --- a/src/runtime/export_map_noswiss_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "unsafe" -) - -const RuntimeHmapSize = unsafe.Sizeof(hmap{}) - -func OverLoadFactor(count int, B uint8) bool { - return overLoadFactor(count, B) -} - -func MapBucketsCount(m map[int]int) int { - h := *(**hmap)(unsafe.Pointer(&m)) - return 1 << h.B -} - -func MapBucketsPointerIsNil(m map[int]int) bool { - h := *(**hmap)(unsafe.Pointer(&m)) - return h.buckets == nil -} - -func MapTombstoneCheck(m map[int]int) { - // Make sure emptyOne and emptyRest are distributed correctly. - // We should have a series of filled and emptyOne cells, followed by - // a series of emptyRest cells. - h := *(**hmap)(unsafe.Pointer(&m)) - i := any(m) - t := *(**maptype)(unsafe.Pointer(&i)) - - for x := 0; x < 1<= n && b.tophash[i] != emptyRest { - panic("late non-emptyRest") - } - if k == n-1 && b.tophash[i] == emptyOne { - panic("last non-emptyRest entry is emptyOne") - } - k++ - } - } - } -} diff --git a/src/runtime/export_map_swiss_test.go b/src/runtime/export_map_swiss_test.go deleted file mode 100644 index 55a7d6ff04..0000000000 --- a/src/runtime/export_map_swiss_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -func MapTombstoneCheck(m map[int]int) { - // TODO -} diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 3af1362ee2..fa30efccb1 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -59,9 +59,6 @@ const CrashStackImplemented = crashStackImplemented const TracebackInnerFrames = tracebackInnerFrames const TracebackOuterFrames = tracebackOuterFrames -var MapKeys = keys -var MapValues = values - var LockPartialOrder = lockPartialOrder type TimeTimer = timeTimer diff --git a/src/runtime/linkname_shim.go b/src/runtime/linkname_shim.go new file mode 100644 index 0000000000..0ceff2b16c --- /dev/null +++ b/src/runtime/linkname_shim.go @@ -0,0 +1,208 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/runtime/maps" + "internal/runtime/sys" + "unsafe" +) + +// Legacy //go:linkname compatibility shims +// +// The functions below are unused by the toolchain, and exist only for +// compatibility with existing //go:linkname use in the ecosystem. + +// linknameIter is the it argument to mapiterinit and mapiternext. +// +// Callers of mapiterinit allocate their own iter structure, which has the +// layout of the pre-Go 1.24 hiter structure, shown here for posterity: +// +// type hiter struct { +// key unsafe.Pointer +// elem unsafe.Pointer +// t *maptype // old map abi.Type +// h *hmap +// buckets unsafe.Pointer +// bptr *bmap +// overflow *[]*bmap +// oldoverflow *[]*bmap +// startBucket uintptr +// offset uint8 +// wrapped bool +// B uint8 +// i uint8 +// bucket uintptr +// checkBucket uintptr +// } +// +// Our structure must maintain compatibility with the old structure. This +// means: +// +// - Our structure must be the same size or smaller than hiter. Otherwise we +// may write outside the caller's hiter allocation. +// - Our structure must have the same pointer layout as hiter, so that the GC +// tracks pointers properly. +// +// Based on analysis of the "hall of shame" users of these linknames: +// +// - The key and elem fields must be kept up to date with the current key/elem. +// Some users directly access the key and elem fields rather than calling +// reflect.mapiterkey/reflect.mapiterelem. +// - The t field must be non-nil after mapiterinit. gonum.org/v1/gonum uses +// this to verify the iterator is initialized. +// - github.com/segmentio/encoding and github.com/RomiChan/protobuf check if h +// is non-nil, but the code has no effect. Thus the value of h does not +// matter. See internal/runtime_reflect/map.go. +type linknameIter struct { + // Fields from hiter. + key unsafe.Pointer + elem unsafe.Pointer + typ *abi.SwissMapType + + // The real iterator. + it *maps.Iter +} + +// mapiterinit is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// mapiterinit should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/goccy/go-json +// - github.com/RomiChan/protobuf +// - github.com/segmentio/encoding +// - github.com/ugorji/go/codec +// - github.com/wI2L/jettison +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapiterinit +func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { + if raceenabled && m != nil { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit)) + } + + it.typ = t + + it.it = new(maps.Iter) + it.it.Init(t, m) + it.it.Next() + + it.key = it.it.Key() + it.elem = it.it.Elem() +} + +// reflect_mapiterinit is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// reflect_mapiterinit should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/modern-go/reflect2 +// - gitee.com/quant1x/gox +// - github.com/v2pro/plz +// - github.com/wI2L/jettison +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_mapiterinit reflect.mapiterinit +func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { + mapiterinit(t, m, it) +} + +// mapiternext is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// mapiternext should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/RomiChan/protobuf +// - github.com/segmentio/encoding +// - github.com/ugorji/go/codec +// - gonum.org/v1/gonum +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapiternext +func mapiternext(it *linknameIter) { + if raceenabled { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(it.it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext)) + } + + it.it.Next() + + it.key = it.it.Key() + it.elem = it.it.Elem() +} + +// reflect_mapiternext is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// reflect_mapiternext is for package reflect, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - gitee.com/quant1x/gox +// - github.com/modern-go/reflect2 +// - github.com/goccy/go-json +// - github.com/v2pro/plz +// - github.com/wI2L/jettison +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_mapiternext reflect.mapiternext +func reflect_mapiternext(it *linknameIter) { + mapiternext(it) +} + +// reflect_mapiterkey is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// reflect_mapiterkey should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/goccy/go-json +// - gonum.org/v1/gonum +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_mapiterkey reflect.mapiterkey +func reflect_mapiterkey(it *linknameIter) unsafe.Pointer { + return it.it.Key() +} + +// reflect_mapiterelem is a compatibility wrapper for map iterator for users of +// //go:linkname from before Go 1.24. It is not used by Go itself. New users +// should use reflect or the maps package. +// +// reflect_mapiterelem should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/goccy/go-json +// - gonum.org/v1/gonum +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_mapiterelem reflect.mapiterelem +func reflect_mapiterelem(it *linknameIter) unsafe.Pointer { + return it.it.Elem() +} diff --git a/src/runtime/linkname_swiss.go b/src/runtime/linkname_swiss.go deleted file mode 100644 index 1be724477e..0000000000 --- a/src/runtime/linkname_swiss.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2025 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/runtime/maps" - "internal/runtime/sys" - "unsafe" -) - -// Legacy //go:linkname compatibility shims -// -// The functions below are unused by the toolchain, and exist only for -// compatibility with existing //go:linkname use in the ecosystem (and in -// map_noswiss.go for normal use via GOEXPERIMENT=noswissmap). - -// linknameIter is the it argument to mapiterinit and mapiternext. -// -// Callers of mapiterinit allocate their own iter structure, which has the -// layout of the pre-Go 1.24 hiter structure, shown here for posterity: -// -// type hiter struct { -// key unsafe.Pointer -// elem unsafe.Pointer -// t *maptype -// h *hmap -// buckets unsafe.Pointer -// bptr *bmap -// overflow *[]*bmap -// oldoverflow *[]*bmap -// startBucket uintptr -// offset uint8 -// wrapped bool -// B uint8 -// i uint8 -// bucket uintptr -// checkBucket uintptr -// } -// -// Our structure must maintain compatibility with the old structure. This -// means: -// -// - Our structure must be the same size or smaller than hiter. Otherwise we -// may write outside the caller's hiter allocation. -// - Our structure must have the same pointer layout as hiter, so that the GC -// tracks pointers properly. -// -// Based on analysis of the "hall of shame" users of these linknames: -// -// - The key and elem fields must be kept up to date with the current key/elem. -// Some users directly access the key and elem fields rather than calling -// reflect.mapiterkey/reflect.mapiterelem. -// - The t field must be non-nil after mapiterinit. gonum.org/v1/gonum uses -// this to verify the iterator is initialized. -// - github.com/segmentio/encoding and github.com/RomiChan/protobuf check if h -// is non-nil, but the code has no effect. Thus the value of h does not -// matter. See internal/runtime_reflect/map.go. -type linknameIter struct { - // Fields from hiter. - key unsafe.Pointer - elem unsafe.Pointer - typ *abi.SwissMapType - - // The real iterator. - it *maps.Iter -} - -// mapiterinit is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// mapiterinit should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiterinit -func mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { - if raceenabled && m != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapiterinit)) - } - - it.typ = t - - it.it = new(maps.Iter) - it.it.Init(t, m) - it.it.Next() - - it.key = it.it.Key() - it.elem = it.it.Elem() -} - -// reflect_mapiterinit is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// reflect_mapiterinit should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/modern-go/reflect2 -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterinit reflect.mapiterinit -func reflect_mapiterinit(t *abi.SwissMapType, m *maps.Map, it *linknameIter) { - mapiterinit(t, m, it) -} - -// mapiternext is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// mapiternext should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiternext -func mapiternext(it *linknameIter) { - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(it.it.Map()), callerpc, abi.FuncPCABIInternal(mapiternext)) - } - - it.it.Next() - - it.key = it.it.Key() - it.elem = it.it.Elem() -} - -// reflect_mapiternext is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// reflect_mapiternext is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiternext reflect.mapiternext -func reflect_mapiternext(it *linknameIter) { - mapiternext(it) -} - -// reflect_mapiterkey is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// reflect_mapiterkey should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterkey reflect.mapiterkey -func reflect_mapiterkey(it *linknameIter) unsafe.Pointer { - return it.it.Key() -} - -// reflect_mapiterelem is a compatibility wrapper for map iterator for users of -// //go:linkname from before Go 1.24. It is not used by Go itself. New users -// should use reflect or the maps package. -// -// reflect_mapiterelem should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterelem reflect.mapiterelem -func reflect_mapiterelem(it *linknameIter) unsafe.Pointer { - return it.it.Elem() -} diff --git a/src/runtime/map.go b/src/runtime/map.go new file mode 100644 index 0000000000..facf86e494 --- /dev/null +++ b/src/runtime/map.go @@ -0,0 +1,329 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/runtime/maps" + "internal/runtime/sys" + "unsafe" +) + +const ( + // TODO: remove? These are used by tests but not the actual map + loadFactorNum = 7 + loadFactorDen = 8 +) + +//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign +var maps_errNilAssign error = plainError("assignment to entry in nil map") + +func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map { + if int64(int(hint)) != hint { + hint = 0 + } + return makemap(t, int(hint), m) +} + +// makemap_small implements Go map creation for make(map[k]v) and +// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots +// at compile time and the map needs to be allocated on the heap. +// +// makemap_small should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname makemap_small +func makemap_small() *maps.Map { + return maps.NewEmptyMap() +} + +// makemap implements Go map creation for make(map[k]v, hint). +// If the compiler has determined that the map or the first group +// can be created on the stack, m and optionally m.dirPtr may be non-nil. +// If m != nil, the map can be created directly in m. +// If m.dirPtr != nil, it points to a group usable for a small map. +// +// makemap should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname makemap +func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map { + if hint < 0 { + hint = 0 + } + + return maps.NewMap(t, uintptr(hint), m, maxAlloc) +} + +// mapaccess1 returns a pointer to h[key]. Never returns nil, instead +// it will return a reference to the zero object for the elem type if +// the key is not in the map. +// NOTE: The returned pointer may keep the whole map live, so don't +// hold onto it for very long. +// +// mapaccess1 is pushed from internal/runtime/maps. We could just call it, but +// we want to avoid one layer of call. +// +//go:linkname mapaccess1 +func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer + +// mapaccess2 should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapaccess2 +func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) + +func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer { + e := mapaccess1(t, m, key) + if e == unsafe.Pointer(&zeroVal[0]) { + return zero + } + return e +} + +func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { + e := mapaccess1(t, m, key) + if e == unsafe.Pointer(&zeroVal[0]) { + return zero, false + } + return e, true +} + +// mapassign is pushed from internal/runtime/maps. We could just call it, but +// we want to avoid one layer of call. +// +// mapassign should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/RomiChan/protobuf +// - github.com/segmentio/encoding +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign +func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer + +// mapdelete should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapdelete +func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { + if raceenabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(mapdelete) + racewritepc(unsafe.Pointer(m), callerpc, pc) + raceReadObjectPC(t.Key, key, callerpc, pc) + } + if msanenabled && m != nil { + msanread(key, t.Key.Size_) + } + if asanenabled && m != nil { + asanread(key, t.Key.Size_) + } + + m.Delete(t, key) +} + +// mapIterStart initializes the Iter struct used for ranging over maps and +// performs the first step of iteration. The Iter struct pointed to by 'it' is +// allocated on the stack by the compilers order pass or on the heap by +// reflect. Both need to have zeroed it since the struct contains pointers. +func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) { + if raceenabled && m != nil { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart)) + } + + it.Init(t, m) + it.Next() +} + +// mapIterNext performs the next step of iteration. Afterwards, the next +// key/elem are in it.Key()/it.Elem(). +func mapIterNext(it *maps.Iter) { + if raceenabled { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapIterNext)) + } + + it.Next() +} + +// mapclear deletes all keys from a map. +func mapclear(t *abi.SwissMapType, m *maps.Map) { + if raceenabled && m != nil { + callerpc := sys.GetCallerPC() + pc := abi.FuncPCABIInternal(mapclear) + racewritepc(unsafe.Pointer(m), callerpc, pc) + } + + m.Clear(t) +} + +// Reflect stubs. Called from ../reflect/asm_*.s + +// reflect_makemap is for package reflect, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - gitee.com/quant1x/gox +// - github.com/modern-go/reflect2 +// - github.com/goccy/go-json +// - github.com/RomiChan/protobuf +// - github.com/segmentio/encoding +// - github.com/v2pro/plz +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_makemap reflect.makemap +func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map { + // Check invariants and reflects math. + if t.Key.Equal == nil { + throw("runtime.reflect_makemap: unsupported map key type") + } + // TODO: other checks + + return makemap(t, cap, nil) +} + +// reflect_mapaccess is for package reflect, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - gitee.com/quant1x/gox +// - github.com/modern-go/reflect2 +// - github.com/v2pro/plz +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_mapaccess reflect.mapaccess +func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer { + elem, ok := mapaccess2(t, m, key) + if !ok { + // reflect wants nil for a missing element + elem = nil + } + return elem +} + +//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr +func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer { + elem, ok := mapaccess2_faststr(t, m, key) + if !ok { + // reflect wants nil for a missing element + elem = nil + } + return elem +} + +// reflect_mapassign is for package reflect, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - gitee.com/quant1x/gox +// - github.com/v2pro/plz +// +// Do not remove or change the type signature. +// +//go:linkname reflect_mapassign reflect.mapassign0 +func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) { + p := mapassign(t, m, key) + typedmemmove(t.Elem, p, elem) +} + +//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 +func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) { + p := mapassign_faststr(t, m, key) + typedmemmove(t.Elem, p, elem) +} + +//go:linkname reflect_mapdelete reflect.mapdelete +func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { + mapdelete(t, m, key) +} + +//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr +func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) { + mapdelete_faststr(t, m, key) +} + +// reflect_maplen is for package reflect, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/goccy/go-json +// - github.com/wI2L/jettison +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname reflect_maplen reflect.maplen +func reflect_maplen(m *maps.Map) int { + if m == nil { + return 0 + } + if raceenabled { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + } + return int(m.Used()) +} + +//go:linkname reflect_mapclear reflect.mapclear +func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) { + mapclear(t, m) +} + +//go:linkname reflectlite_maplen internal/reflectlite.maplen +func reflectlite_maplen(m *maps.Map) int { + if m == nil { + return 0 + } + if raceenabled { + callerpc := sys.GetCallerPC() + racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen)) + } + return int(m.Used()) +} + +// mapinitnoop is a no-op function known the Go linker; if a given global +// map (of the right size) is determined to be dead, the linker will +// rewrite the relocation (from the package init func) from the outlined +// map init function to this symbol. Defined in assembly so as to avoid +// complications with instrumentation (coverage, etc). +func mapinitnoop() + +// mapclone for implementing maps.Clone +// +//go:linkname mapclone maps.clone +func mapclone(m any) any { + e := efaceOf(&m) + typ := (*abi.SwissMapType)(unsafe.Pointer(e._type)) + map_ := (*maps.Map)(e.data) + map_ = map_.Clone(typ) + e.data = (unsafe.Pointer)(map_) + return m +} diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go new file mode 100644 index 0000000000..8b460edf77 --- /dev/null +++ b/src/runtime/map_fast32.go @@ -0,0 +1,53 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/runtime/maps" + "unsafe" +) + +// Functions below pushed from internal/runtime/maps. + +//go:linkname mapaccess1_fast32 +func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer + +// mapaccess2_fast32 should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapaccess2_fast32 +func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) + +// mapassign_fast32 should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign_fast32 +func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer + +// mapassign_fast32ptr should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign_fast32ptr +func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer + +//go:linkname mapdelete_fast32 +func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) diff --git a/src/runtime/map_fast32_noswiss.go b/src/runtime/map_fast32_noswiss.go deleted file mode 100644 index 751717b6cd..0000000000 --- a/src/runtime/map_fast32_noswiss.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast32 -func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32 -func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - inserti = i - insertb = b - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) - if k != key { - continue - } - inserti = i - insertb = b - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) - // store new key at insert position - *(*uint32)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -// mapassign_fast32ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32ptr -func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - inserti = i - insertb = b - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4))) - if k != key { - continue - } - inserti = i - insertb = b - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) - // store new key at insert position - *(*unsafe.Pointer)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*4+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_fast32(t *maptype, h *hmap, key uint32) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast32(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b -search: - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 4) { - if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { - continue - } - // Only clear key if there are pointers in it. - // This can only happen if pointers are 32 bit - // wide as 64 bit pointers do not fit into a 32 bit key. - if goarch.PtrSize == 4 && t.Key.Pointers() { - // The key must be a pointer as we checked pointers are - // 32 bits wide and the key is 32 bits wide also. - *(*unsafe.Pointer)(k) = nil - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*4+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_fast32(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_fast32(t, h, h.nevacuate) - } -} - -func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*4) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*4) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*4) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*4) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled { - // Write with a write barrier. - *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) - } else { - *(*uint32)(dst.k) = *(*uint32)(k) - } - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 4) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_fast32_swiss.go b/src/runtime/map_fast32_swiss.go deleted file mode 100644 index 0a241d3793..0000000000 --- a/src/runtime/map_fast32_swiss.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/runtime/maps" - "unsafe" -) - -// Functions below pushed from internal/runtime/maps. - -//go:linkname mapaccess1_fast32 -func mapaccess1_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer - -// mapaccess2_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast32 -func mapaccess2_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) (unsafe.Pointer, bool) - -// mapassign_fast32 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32 -func mapassign_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) unsafe.Pointer - -// mapassign_fast32ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast32ptr -func mapassign_fast32ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer - -//go:linkname mapdelete_fast32 -func mapdelete_fast32(t *abi.SwissMapType, m *maps.Map, key uint32) diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go new file mode 100644 index 0000000000..5de22a5bea --- /dev/null +++ b/src/runtime/map_fast64.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/runtime/maps" + "unsafe" +) + +// Functions below pushed from internal/runtime/maps. + +//go:linkname mapaccess1_fast64 +func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer + +// mapaccess2_fast64 should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapaccess2_fast64 +func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) + +// mapassign_fast64 should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign_fast64 +func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer + +// mapassign_fast64ptr should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign_fast64ptr +func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer + +//go:linkname mapdelete_fast64 +func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) diff --git a/src/runtime/map_fast64_noswiss.go b/src/runtime/map_fast64_noswiss.go deleted file mode 100644 index abb272d2b6..0000000000 --- a/src/runtime/map_fast64_noswiss.go +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast64 -func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - var b *bmap - if h.B == 0 { - // One-bucket table. No need to hash. - b = (*bmap)(h.buckets) - } else { - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - m := bucketMask(h.B) - b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - } - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64 -func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) - if k != key { - continue - } - insertb = b - inserti = i - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) - // store new key at insert position - *(*uint64)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -// mapassign_fast64ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64ptr -func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(b.tophash[i]) { - if insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8))) - if k != key { - continue - } - insertb = b - inserti = i - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) - // store new key at insert position - *(*unsafe.Pointer)(insertk) = key - - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*8+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_fast64(t *maptype, h *hmap, key uint64) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_fast64(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b -search: - for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, k = i+1, add(k, 8) { - if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { - continue - } - // Only clear key if there are pointers in it. - if t.Key.Pointers() { - if goarch.PtrSize == 8 { - *(*unsafe.Pointer)(k) = nil - } else { - // There are three ways to squeeze at one or more 32 bit pointers into 64 bits. - // Just call memclrHasPointers instead of trying to handle all cases here. - memclrHasPointers(k, 8) - } - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*8+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_fast64(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_fast64(t, h, h.nevacuate) - } -} - -func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*8) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*8) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*8) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*8) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - if t.Key.Pointers() && writeBarrier.enabled { - if goarch.PtrSize == 8 { - // Write with a write barrier. - *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) - } else { - // There are three ways to squeeze at least one 32 bit pointer into 64 bits. - // Give up and call typedmemmove. - typedmemmove(t.Key, dst.k, k) - } - } else { - *(*uint64)(dst.k) = *(*uint64)(k) - } - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 8) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_fast64_swiss.go b/src/runtime/map_fast64_swiss.go deleted file mode 100644 index 8b7fcf88e8..0000000000 --- a/src/runtime/map_fast64_swiss.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/runtime/maps" - "unsafe" -) - -// Functions below pushed from internal/runtime/maps. - -//go:linkname mapaccess1_fast64 -func mapaccess1_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer - -// mapaccess2_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_fast64 -func mapaccess2_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) (unsafe.Pointer, bool) - -// mapassign_fast64 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64 -func mapassign_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) unsafe.Pointer - -// mapassign_fast64ptr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_fast64ptr -func mapassign_fast64ptr(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer - -//go:linkname mapdelete_fast64 -func mapdelete_fast64(t *abi.SwissMapType, m *maps.Map, key uint64) diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go new file mode 100644 index 0000000000..ca16a8eab6 --- /dev/null +++ b/src/runtime/map_faststr.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "internal/abi" + "internal/runtime/maps" + "unsafe" +) + +// Functions below pushed from internal/runtime/maps. + +//go:linkname mapaccess1_faststr +func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer + +// mapaccess2_faststr should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapaccess2_faststr +func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool) + +// mapassign_faststr should be an internal detail, +// but widely used packages access it using linkname. +// Notable members of the hall of shame include: +// - github.com/bytedance/sonic +// - github.com/ugorji/go/codec +// +// Do not remove or change the type signature. +// See go.dev/issue/67401. +// +//go:linkname mapassign_faststr +func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer + +//go:linkname mapdelete_faststr +func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string) diff --git a/src/runtime/map_faststr_noswiss.go b/src/runtime/map_faststr_noswiss.go deleted file mode 100644 index e8b6a3f1ae..0000000000 --- a/src/runtime/map_faststr_noswiss.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/sys" - "unsafe" -) - -func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - key := stringStructOf(&ky) - if h.B == 0 { - // One-bucket table. - b := (*bmap)(h.buckets) - if key.len < 32 { - // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - } - return unsafe.Pointer(&zeroVal[0]) - } - // long key, try not to do more comparisons than necessary - keymaybe := uintptr(abi.OldMapBucketCount) - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - // check first 4 bytes - if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { - continue - } - // check last 4 bytes - if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { - continue - } - if keymaybe != abi.OldMapBucketCount { - // Two keys are potential matches. Use hash to distinguish them. - goto dohash - } - keymaybe = i - } - if keymaybe != abi.OldMapBucketCount { - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) - if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) - } - } - return unsafe.Pointer(&zeroVal[0]) - } -dohash: - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_faststr -func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr)) - } - if h == nil || h.count == 0 { - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - key := stringStructOf(&ky) - if h.B == 0 { - // One-bucket table. - b := (*bmap)(h.buckets) - if key.len < 32 { - // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - } - return unsafe.Pointer(&zeroVal[0]), false - } - // long key, try not to do more comparisons than necessary - keymaybe := uintptr(abi.OldMapBucketCount) - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || isEmpty(b.tophash[i]) { - if b.tophash[i] == emptyRest { - break - } - continue - } - if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - // check first 4 bytes - if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { - continue - } - // check last 4 bytes - if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { - continue - } - if keymaybe != abi.OldMapBucketCount { - // Two keys are potential matches. Use hash to distinguish them. - goto dohash - } - keymaybe = i - } - if keymaybe != abi.OldMapBucketCount { - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) - if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true - } - } - return unsafe.Pointer(&zeroVal[0]), false - } -dohash: - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// mapassign_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_faststr -func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - key := stringStructOf(&s) - hash := t.Hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapassign. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_faststr(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - top := tophash(hash) - - var insertb *bmap - var inserti uintptr - var insertk unsafe.Pointer - -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if isEmpty(b.tophash[i]) && insertb == nil { - insertb = b - inserti = i - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize)) - if k.len != key.len { - continue - } - if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { - continue - } - // already have a mapping for key. Update it. - inserti = i - insertb = b - // Overwrite existing key, so it can be garbage collected. - // The size is already guaranteed to be set correctly. - k.str = key.str - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if insertb == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - insertb = h.newoverflow(t, b) - inserti = 0 // not necessary, but avoids needlessly spilling inserti - } - insertb.tophash[inserti&(abi.OldMapBucketCount-1)] = top // mask inserti to avoid bounds checks - - insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize) - // store new key at insert position - *((*stringStruct)(insertk)) = *key - h.count++ - -done: - elem := add(unsafe.Pointer(insertb), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - return elem -} - -func mapdelete_faststr(t *maptype, h *hmap, ky string) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr)) - } - if h == nil || h.count == 0 { - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - key := stringStructOf(&ky) - hash := t.Hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher for consistency with mapdelete - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork_faststr(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b - top := tophash(hash) -search: - for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < abi.OldMapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { - k := (*stringStruct)(kptr) - if k.len != key.len || b.tophash[i] != top { - continue - } - if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { - continue - } - // Clear key's pointer. - k.str = nil - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate_faststr(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate_faststr(t, h, h.nevacuate) - } -} - -func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*2*goarch.PtrSize) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*2*goarch.PtrSize) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*2*goarch.PtrSize) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&newbit != 0 { - useY = 1 - } - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*2*goarch.PtrSize) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - - // Copy key. - *(*string)(dst.k) = *(*string)(k) - - typedmemmove(t.Elem, dst.e, e) - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, 2*goarch.PtrSize) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} diff --git a/src/runtime/map_faststr_swiss.go b/src/runtime/map_faststr_swiss.go deleted file mode 100644 index 23f6c1e810..0000000000 --- a/src/runtime/map_faststr_swiss.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/runtime/maps" - "unsafe" -) - -// Functions below pushed from internal/runtime/maps. - -//go:linkname mapaccess1_faststr -func mapaccess1_faststr(t *abi.SwissMapType, m *maps.Map, ky string) unsafe.Pointer - -// mapaccess2_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2_faststr -func mapaccess2_faststr(t *abi.SwissMapType, m *maps.Map, ky string) (unsafe.Pointer, bool) - -// mapassign_faststr should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign_faststr -func mapassign_faststr(t *abi.SwissMapType, m *maps.Map, s string) unsafe.Pointer - -//go:linkname mapdelete_faststr -func mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, ky string) diff --git a/src/runtime/map_noswiss.go b/src/runtime/map_noswiss.go deleted file mode 100644 index 7b3c98eb88..0000000000 --- a/src/runtime/map_noswiss.go +++ /dev/null @@ -1,1891 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime - -// This file contains the implementation of Go's map type. -// -// A map is just a hash table. The data is arranged -// into an array of buckets. Each bucket contains up to -// 8 key/elem pairs. The low-order bits of the hash are -// used to select a bucket. Each bucket contains a few -// high-order bits of each hash to distinguish the entries -// within a single bucket. -// -// If more than 8 keys hash to a bucket, we chain on -// extra buckets. -// -// When the hashtable grows, we allocate a new array -// of buckets twice as big. Buckets are incrementally -// copied from the old bucket array to the new bucket array. -// -// Map iterators walk through the array of buckets and -// return the keys in walk order (bucket #, then overflow -// chain order, then bucket index). To maintain iteration -// semantics, we never move keys within their bucket (if -// we did, keys might be returned 0 or 2 times). When -// growing the table, iterators remain iterating through the -// old table and must check the new table if the bucket -// they are iterating through has been moved ("evacuated") -// to the new table. - -// Picking loadFactor: too large and we have lots of overflow -// buckets, too small and we waste a lot of space. I wrote -// a simple program to check some stats for different loads: -// (64-bit, 8 byte keys and elems) -// loadFactor %overflow bytes/entry hitprobe missprobe -// 4.00 2.13 20.77 3.00 4.00 -// 4.50 4.05 17.30 3.25 4.50 -// 5.00 6.85 14.77 3.50 5.00 -// 5.50 10.55 12.94 3.75 5.50 -// 6.00 15.27 11.67 4.00 6.00 -// 6.50 20.90 10.79 4.25 6.50 -// 7.00 27.14 10.15 4.50 7.00 -// 7.50 34.03 9.73 4.75 7.50 -// 8.00 41.10 9.40 5.00 8.00 -// -// %overflow = percentage of buckets which have an overflow bucket -// bytes/entry = overhead bytes used per key/elem pair -// hitprobe = # of entries to check when looking up a present key -// missprobe = # of entries to check when looking up an absent key -// -// Keep in mind this data is for maximally loaded tables, i.e. just -// before the table grows. Typical tables will be somewhat less loaded. - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/atomic" - "internal/runtime/maps" - "internal/runtime/math" - "internal/runtime/sys" - "unsafe" -) - -type maptype = abi.OldMapType - -const ( - // Maximum number of key/elem pairs a bucket can hold. - bucketCntBits = abi.OldMapBucketCountBits - - // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) - // Because of minimum alignment rules, bucketCnt is known to be at least 8. - // Represent as loadFactorNum/loadFactorDen, to allow integer math. - loadFactorDen = 2 - loadFactorNum = loadFactorDen * abi.OldMapBucketCount * 13 / 16 - - // data offset should be the size of the bmap struct, but needs to be - // aligned correctly. For amd64p32 this means 64-bit alignment - // even though pointers are 32 bit. - dataOffset = unsafe.Offsetof(struct { - b bmap - v int64 - }{}.v) - - // Possible tophash values. We reserve a few possibilities for special marks. - // Each bucket (including its overflow buckets, if any) will have either all or none of its - // entries in the evacuated* states (except during the evacuate() method, which only happens - // during map writes and thus no one else can observe the map during that time). - emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows. - emptyOne = 1 // this cell is empty - evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table. - evacuatedY = 3 // same as above, but evacuated to second half of larger table. - evacuatedEmpty = 4 // cell is empty, bucket is evacuated. - minTopHash = 5 // minimum tophash for a normal filled cell. - - // flags - iterator = 1 // there may be an iterator using buckets - oldIterator = 2 // there may be an iterator using oldbuckets - hashWriting = 4 // a goroutine is writing to the map - sameSizeGrow = 8 // the current map growth is to a new map of the same size - - // sentinel bucket ID for iterator checks - noCheck = 1<<(8*goarch.PtrSize) - 1 -) - -// isEmpty reports whether the given tophash array entry represents an empty bucket entry. -func isEmpty(x uint8) bool { - return x <= emptyOne -} - -// A header for a Go map. -type hmap struct { - // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go. - // Make sure this stays in sync with the compiler's definition. - count int // # live cells == size of map. Must be first (used by len() builtin) - flags uint8 - B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) - noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details - hash0 uint32 // hash seed - - buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. - oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing - nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) - clearSeq uint64 - - extra *mapextra // optional fields -} - -// mapextra holds fields that are not present on all maps. -type mapextra struct { - // If both key and elem do not contain pointers and are inline, then we mark bucket - // type as containing no pointers. This avoids scanning such maps. - // However, bmap.overflow is a pointer. In order to keep overflow buckets - // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow. - // overflow and oldoverflow are only used if key and elem do not contain pointers. - // overflow contains overflow buckets for hmap.buckets. - // oldoverflow contains overflow buckets for hmap.oldbuckets. - // The indirection allows to store a pointer to the slice in hiter. - overflow *[]*bmap - oldoverflow *[]*bmap - - // nextOverflow holds a pointer to a free overflow bucket. - nextOverflow *bmap -} - -// A bucket for a Go map. -type bmap struct { - // tophash generally contains the top byte of the hash value - // for each key in this bucket. If tophash[0] < minTopHash, - // tophash[0] is a bucket evacuation state instead. - tophash [abi.OldMapBucketCount]uint8 - // Followed by bucketCnt keys and then bucketCnt elems. - // NOTE: packing all the keys together and then all the elems together makes the - // code a bit more complicated than alternating key/elem/key/elem/... but it allows - // us to eliminate padding which would be needed for, e.g., map[int64]int8. - // Followed by an overflow pointer. -} - -// A hash iteration structure. -// If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go -// and reflect/value.go to match the layout of this structure. -type hiter struct { - key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go). - elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go). - t *maptype - h *hmap - buckets unsafe.Pointer // bucket ptr at hash_iter initialization time - bptr *bmap // current bucket - overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive - oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive - startBucket uintptr // bucket iteration started at - offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) - wrapped bool // already wrapped around from end of bucket array to beginning - B uint8 - i uint8 - bucket uintptr - checkBucket uintptr - clearSeq uint64 -} - -// bucketShift returns 1<> (goarch.PtrSize*8 - 8)) - if top < minTopHash { - top += minTopHash - } - return top -} - -func evacuated(b *bmap) bool { - h := b.tophash[0] - return h > emptyOne && h < minTopHash -} - -func (b *bmap) overflow(t *maptype) *bmap { - return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) -} - -func (b *bmap) setoverflow(t *maptype, ovf *bmap) { - *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf -} - -func (b *bmap) keys() unsafe.Pointer { - return add(unsafe.Pointer(b), dataOffset) -} - -// incrnoverflow increments h.noverflow. -// noverflow counts the number of overflow buckets. -// This is used to trigger same-size map growth. -// See also tooManyOverflowBuckets. -// To keep hmap small, noverflow is a uint16. -// When there are few buckets, noverflow is an exact count. -// When there are many buckets, noverflow is an approximate count. -func (h *hmap) incrnoverflow() { - // We trigger same-size map growth if there are - // as many overflow buckets as buckets. - // We need to be able to count to 1< maxAlloc { - hint = 0 - } - - // initialize Hmap - if h == nil { - h = new(hmap) - } - h.hash0 = uint32(rand()) - - // Find the size parameter B which will hold the requested # of elements. - // For hint < 0 overLoadFactor returns false since hint < bucketCnt. - B := uint8(0) - for overLoadFactor(hint, B) { - B++ - } - h.B = B - - // allocate initial hash table - // if B == 0, the buckets field is allocated lazily later (in mapassign) - // If hint is large zeroing this memory could take a while. - if h.B != 0 { - var nextOverflow *bmap - h.buckets, nextOverflow = makeBucketArray(t, h.B, nil) - if nextOverflow != nil { - h.extra = new(mapextra) - h.extra.nextOverflow = nextOverflow - } - } - - return h -} - -// makeBucketArray initializes a backing array for map buckets. -// 1<= 4 { - // Add on the estimated number of overflow buckets - // required to insert the median number of elements - // used with this value of b. - nbuckets += bucketShift(b - 4) - sz := t.Bucket.Size_ * nbuckets - up := roundupsize(sz, !t.Bucket.Pointers()) - if up != sz { - nbuckets = up / t.Bucket.Size_ - } - } - - if dirtyalloc == nil { - buckets = newarray(t.Bucket, int(nbuckets)) - } else { - // dirtyalloc was previously generated by - // the above newarray(t.Bucket, int(nbuckets)) - // but may not be empty. - buckets = dirtyalloc - size := t.Bucket.Size_ * nbuckets - if t.Bucket.Pointers() { - memclrHasPointers(buckets, size) - } else { - memclrNoHeapPointers(buckets, size) - } - } - - if base != nbuckets { - // We preallocated some overflow buckets. - // To keep the overhead of tracking these overflow buckets to a minimum, - // we use the convention that if a preallocated overflow bucket's overflow - // pointer is nil, then there are more available by bumping the pointer. - // We need a safe non-nil pointer for the last overflow bucket; just use buckets. - nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize))) - last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize))) - last.setoverflow(t, (*bmap)(buckets)) - } - return buckets, nextOverflow -} - -// mapaccess1 returns a pointer to h[key]. Never returns nil, instead -// it will return a reference to the zero object for the elem type if -// the key is not in the map. -// NOTE: The returned pointer may keep the whole map live, so don't -// hold onto it for very long. -func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapaccess1) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return e - } - } - } - return unsafe.Pointer(&zeroVal[0]) -} - -// mapaccess2 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2 -func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapaccess2) - racereadpc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return unsafe.Pointer(&zeroVal[0]), false - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return e, true - } - } - } - return unsafe.Pointer(&zeroVal[0]), false -} - -// returns both key and elem. Used by map iterator. -func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { - if h == nil || h.count == 0 { - return nil, nil - } - hash := t.Hasher(key, uintptr(h.hash0)) - m := bucketMask(h.B) - b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize))) - if c := h.oldbuckets; c != nil { - if !h.sameSizeGrow() { - // There used to be half as many buckets; mask down one more power of two. - m >>= 1 - } - oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize))) - if !evacuated(oldb) { - b = oldb - } - } - top := tophash(hash) -bucketloop: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - return k, e - } - } - } - return nil, nil -} - -func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { - e := mapaccess1(t, h, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero - } - return e -} - -func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { - e := mapaccess1(t, h, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero, false - } - return e, true -} - -// Like mapaccess, but allocates a slot for the key if it is not present in the map. -// -// mapassign should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign -func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - if h == nil { - panic(plainError("assignment to entry in nil map")) - } - if raceenabled { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapassign) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled { - msanread(key, t.Key.Size_) - } - if asanenabled { - asanread(key, t.Key.Size_) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - hash := t.Hasher(key, uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher, since t.hasher may panic, - // in which case we have not actually done a write. - h.flags ^= hashWriting - - if h.buckets == nil { - h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1) - } - -again: - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - top := tophash(hash) - - var inserti *uint8 - var insertk unsafe.Pointer - var elem unsafe.Pointer -bucketloop: - for { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if isEmpty(b.tophash[i]) && inserti == nil { - inserti = &b.tophash[i] - insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - } - if b.tophash[i] == emptyRest { - break bucketloop - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if !t.Key.Equal(key, k) { - continue - } - // already have a mapping for key. Update it. - if t.NeedKeyUpdate() { - typedmemmove(t.Key, k, key) - } - elem = add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - goto done - } - ovf := b.overflow(t) - if ovf == nil { - break - } - b = ovf - } - - // Did not find mapping for key. Allocate new cell & add entry. - - // If we hit the max load factor or we have too many overflow buckets, - // and we're not already in the middle of growing, start growing. - if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { - hashGrow(t, h) - goto again // Growing the table invalidates everything, so try again - } - - if inserti == nil { - // The current bucket and all the overflow buckets connected to it are full, allocate a new one. - newb := h.newoverflow(t, b) - inserti = &newb.tophash[0] - insertk = add(unsafe.Pointer(newb), dataOffset) - elem = add(insertk, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - - // store new key/elem at insert position - if t.IndirectKey() { - kmem := newobject(t.Key) - *(*unsafe.Pointer)(insertk) = kmem - insertk = kmem - } - if t.IndirectElem() { - vmem := newobject(t.Elem) - *(*unsafe.Pointer)(elem) = vmem - } - typedmemmove(t.Key, insertk, key) - *inserti = top - h.count++ - -done: - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting - if t.IndirectElem() { - elem = *((*unsafe.Pointer)(elem)) - } - return elem -} - -// mapdelete should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapdelete -func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapdelete) - racewritepc(unsafe.Pointer(h), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && h != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && h != nil { - asanread(key, t.Key.Size_) - } - if h == nil || h.count == 0 { - if err := maps.OldMapKeyError(t, key); err != nil { - panic(err) // see issue 23734 - } - return - } - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - hash := t.Hasher(key, uintptr(h.hash0)) - - // Set hashWriting after calling t.hasher, since t.hasher may panic, - // in which case we have not actually done a write (delete). - h.flags ^= hashWriting - - bucket := hash & bucketMask(h.B) - if h.growing() { - growWork(t, h, bucket) - } - b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize))) - bOrig := b - top := tophash(hash) -search: - for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if b.tophash[i] != top { - if b.tophash[i] == emptyRest { - break search - } - continue - } - k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - k2 := k - if t.IndirectKey() { - k2 = *((*unsafe.Pointer)(k2)) - } - if !t.Key.Equal(key, k2) { - continue - } - // Only clear key if there are pointers in it. - if t.IndirectKey() { - *(*unsafe.Pointer)(k) = nil - } else if t.Key.Pointers() { - memclrHasPointers(k, t.Key.Size_) - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - *(*unsafe.Pointer)(e) = nil - } else if t.Elem.Pointers() { - memclrHasPointers(e, t.Elem.Size_) - } else { - memclrNoHeapPointers(e, t.Elem.Size_) - } - b.tophash[i] = emptyOne - // If the bucket now ends in a bunch of emptyOne states, - // change those to emptyRest states. - // It would be nice to make this a separate function, but - // for loops are not currently inlineable. - if i == abi.OldMapBucketCount-1 { - if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { - goto notLast - } - } else { - if b.tophash[i+1] != emptyRest { - goto notLast - } - } - for { - b.tophash[i] = emptyRest - if i == 0 { - if b == bOrig { - break // beginning of initial bucket, we're done. - } - // Find previous bucket, continue at its last entry. - c := b - for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { - } - i = abi.OldMapBucketCount - 1 - } else { - i-- - } - if b.tophash[i] != emptyOne { - break - } - } - notLast: - h.count-- - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - if h.count == 0 { - h.hash0 = uint32(rand()) - } - break search - } - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -// mapiterinit initializes the hiter struct used for ranging over maps. -// The hiter struct pointed to by 'it' is allocated on the stack -// by the compilers order pass or on the heap by reflect_mapiterinit. -// Both need to have zeroed hiter since the struct contains pointers. -// -// mapiterinit should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiterinit -func mapiterinit(t *maptype, h *hmap, it *hiter) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit)) - } - - it.t = t - if h == nil || h.count == 0 { - return - } - - if unsafe.Sizeof(hiter{}) != 8+12*goarch.PtrSize { - throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go - } - it.h = h - it.clearSeq = h.clearSeq - - // grab snapshot of bucket state - it.B = h.B - it.buckets = h.buckets - if !t.Bucket.Pointers() { - // Allocate the current slice and remember pointers to both current and old. - // This preserves all relevant overflow buckets alive even if - // the table grows and/or overflow buckets are added to the table - // while we are iterating. - h.createOverflow() - it.overflow = h.extra.overflow - it.oldoverflow = h.extra.oldoverflow - } - - // decide where to start - r := uintptr(rand()) - it.startBucket = r & bucketMask(h.B) - it.offset = uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - - // iterator state - it.bucket = it.startBucket - - // Remember we have an iterator. - // Can run concurrently with another mapiterinit(). - if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { - atomic.Or8(&h.flags, iterator|oldIterator) - } - - mapiternext(it) -} - -// mapiternext should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapiternext -func mapiternext(it *hiter) { - h := it.h - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext)) - } - if h.flags&hashWriting != 0 { - fatal("concurrent map iteration and map write") - } - t := it.t - bucket := it.bucket - b := it.bptr - i := it.i - checkBucket := it.checkBucket - -next: - if b == nil { - if bucket == it.startBucket && it.wrapped { - // end of iteration - it.key = nil - it.elem = nil - return - } - if h.growing() && it.B == h.B { - // Iterator was started in the middle of a grow, and the grow isn't done yet. - // If the bucket we're looking at hasn't been filled in yet (i.e. the old - // bucket hasn't been evacuated) then we need to iterate through the old - // bucket and only return the ones that will be migrated to this bucket. - oldbucket := bucket & it.h.oldbucketmask() - b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - if !evacuated(b) { - checkBucket = bucket - } else { - b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) - checkBucket = noCheck - } - } else { - b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize))) - checkBucket = noCheck - } - bucket++ - if bucket == bucketShift(it.B) { - bucket = 0 - it.wrapped = true - } - i = 0 - } - for ; i < abi.OldMapBucketCount; i++ { - offi := (i + it.offset) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { - // TODO: emptyRest is hard to use here, as we start iterating - // in the middle of a bucket. It's feasible, just tricky. - continue - } - k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - e := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize)) - if checkBucket != noCheck && !h.sameSizeGrow() { - // Special case: iterator was started during a grow to a larger size - // and the grow is not done yet. We're working on a bucket whose - // oldbucket has not been evacuated yet. Or at least, it wasn't - // evacuated when we started the bucket. So we're iterating - // through the oldbucket, skipping any keys that will go - // to the other new bucket (each oldbucket expands to two - // buckets during a grow). - if t.ReflexiveKey() || t.Key.Equal(k, k) { - // If the item in the oldbucket is not destined for - // the current new bucket in the iteration, skip it. - hash := t.Hasher(k, uintptr(h.hash0)) - if hash&bucketMask(it.B) != checkBucket { - continue - } - } else { - // Hash isn't repeatable if k != k (NaNs). We need a - // repeatable and randomish choice of which direction - // to send NaNs during evacuation. We'll use the low - // bit of tophash to decide which way NaNs go. - // NOTE: this case is why we need two evacuate tophash - // values, evacuatedX and evacuatedY, that differ in - // their low bit. - if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { - continue - } - } - } - if it.clearSeq == h.clearSeq && - ((b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.ReflexiveKey() || t.Key.Equal(k, k))) { - // This is the golden data, we can return it. - // OR - // key!=key, so the entry can't be deleted or updated, so we can just return it. - // That's lucky for us because when key!=key we can't look it up successfully. - it.key = k - if t.IndirectElem() { - e = *((*unsafe.Pointer)(e)) - } - it.elem = e - } else { - // The hash table has grown since the iterator was started. - // The golden data for this key is now somewhere else. - // Check the current hash table for the data. - // This code handles the case where the key - // has been deleted, updated, or deleted and reinserted. - // NOTE: we need to regrab the key as it has potentially been - // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). - rk, re := mapaccessK(t, h, k) - if rk == nil { - continue // key has been deleted - } - it.key = rk - it.elem = re - } - it.bucket = bucket - if it.bptr != b { // avoid unnecessary write barrier; see issue 14921 - it.bptr = b - } - it.i = i + 1 - it.checkBucket = checkBucket - return - } - b = b.overflow(t) - i = 0 - goto next -} - -// mapclear deletes all keys from a map. -// It is called by the compiler. -func mapclear(t *maptype, h *hmap) { - if raceenabled && h != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapclear) - racewritepc(unsafe.Pointer(h), callerpc, pc) - } - - if h == nil || h.count == 0 { - return - } - - if h.flags&hashWriting != 0 { - fatal("concurrent map writes") - } - - h.flags ^= hashWriting - h.flags &^= sameSizeGrow - h.oldbuckets = nil - h.nevacuate = 0 - h.noverflow = 0 - h.count = 0 - h.clearSeq++ - - // Reset the hash seed to make it more difficult for attackers to - // repeatedly trigger hash collisions. See issue 25237. - h.hash0 = uint32(rand()) - - // Keep the mapextra allocation but clear any extra information. - if h.extra != nil { - *h.extra = mapextra{} - } - - // makeBucketArray clears the memory pointed to by h.buckets - // and recovers any overflow buckets by generating them - // as if h.buckets was newly alloced. - _, nextOverflow := makeBucketArray(t, h.B, h.buckets) - if nextOverflow != nil { - // If overflow buckets are created then h.extra - // will have been allocated during initial bucket creation. - h.extra.nextOverflow = nextOverflow - } - - if h.flags&hashWriting == 0 { - fatal("concurrent map writes") - } - h.flags &^= hashWriting -} - -func hashGrow(t *maptype, h *hmap) { - // If we've hit the load factor, get bigger. - // Otherwise, there are too many overflow buckets, - // so keep the same number of buckets and "grow" laterally. - bigger := uint8(1) - if !overLoadFactor(h.count+1, h.B) { - bigger = 0 - h.flags |= sameSizeGrow - } - oldbuckets := h.buckets - newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil) - - flags := h.flags &^ (iterator | oldIterator) - if h.flags&iterator != 0 { - flags |= oldIterator - } - // commit the grow (atomic wrt gc) - h.B += bigger - h.flags = flags - h.oldbuckets = oldbuckets - h.buckets = newbuckets - h.nevacuate = 0 - h.noverflow = 0 - - if h.extra != nil && h.extra.overflow != nil { - // Promote current overflow buckets to the old generation. - if h.extra.oldoverflow != nil { - throw("oldoverflow is not nil") - } - h.extra.oldoverflow = h.extra.overflow - h.extra.overflow = nil - } - if nextOverflow != nil { - if h.extra == nil { - h.extra = new(mapextra) - } - h.extra.nextOverflow = nextOverflow - } - - // the actual copying of the hash table data is done incrementally - // by growWork() and evacuate(). -} - -// overLoadFactor reports whether count items placed in 1< abi.OldMapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) -} - -// tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1< 15 { - B = 15 - } - // The compiler doesn't see here that B < 16; mask B to generate shorter shift code. - return noverflow >= uint16(1)<<(B&15) -} - -// growing reports whether h is growing. The growth may be to the same size or bigger. -func (h *hmap) growing() bool { - return h.oldbuckets != nil -} - -// sameSizeGrow reports whether the current growth is to a map of the same size. -func (h *hmap) sameSizeGrow() bool { - return h.flags&sameSizeGrow != 0 -} - -//go:linkname sameSizeGrowForIssue69110Test -func sameSizeGrowForIssue69110Test(h *hmap) bool { - return h.sameSizeGrow() -} - -// noldbuckets calculates the number of buckets prior to the current map growth. -func (h *hmap) noldbuckets() uintptr { - oldB := h.B - if !h.sameSizeGrow() { - oldB-- - } - return bucketShift(oldB) -} - -// oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). -func (h *hmap) oldbucketmask() uintptr { - return h.noldbuckets() - 1 -} - -func growWork(t *maptype, h *hmap, bucket uintptr) { - // make sure we evacuate the oldbucket corresponding - // to the bucket we're about to use - evacuate(t, h, bucket&h.oldbucketmask()) - - // evacuate one more oldbucket to make progress on growing - if h.growing() { - evacuate(t, h, h.nevacuate) - } -} - -func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool { - b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize))) - return evacuated(b) -} - -// evacDst is an evacuation destination. -type evacDst struct { - b *bmap // current destination bucket - i int // key/elem index into b - k unsafe.Pointer // pointer to current key storage - e unsafe.Pointer // pointer to current elem storage -} - -func evacuate(t *maptype, h *hmap, oldbucket uintptr) { - b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))) - newbit := h.noldbuckets() - if !evacuated(b) { - // TODO: reuse overflow buckets instead of using new ones, if there - // is no iterator using the old buckets. (If !oldIterator.) - - // xy contains the x and y (low and high) evacuation destinations. - var xy [2]evacDst - x := &xy[0] - x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) - x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - - if !h.sameSizeGrow() { - // Only calculate y pointers if we're growing bigger. - // Otherwise GC can see bad pointers. - y := &xy[1] - y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) - y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - - for ; b != nil; b = b.overflow(t) { - k := add(unsafe.Pointer(b), dataOffset) - e := add(k, abi.OldMapBucketCount*uintptr(t.KeySize)) - for i := 0; i < abi.OldMapBucketCount; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) { - top := b.tophash[i] - if isEmpty(top) { - b.tophash[i] = evacuatedEmpty - continue - } - if top < minTopHash { - throw("bad map state") - } - k2 := k - if t.IndirectKey() { - k2 = *((*unsafe.Pointer)(k2)) - } - var useY uint8 - if !h.sameSizeGrow() { - // Compute hash to make our evacuation decision (whether we need - // to send this key/elem to bucket x or bucket y). - hash := t.Hasher(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) { - // If key != key (NaNs), then the hash could be (and probably - // will be) entirely different from the old hash. Moreover, - // it isn't reproducible. Reproducibility is required in the - // presence of iterators, as our evacuation decision must - // match whatever decision the iterator made. - // Fortunately, we have the freedom to send these keys either - // way. Also, tophash is meaningless for these kinds of keys. - // We let the low bit of tophash drive the evacuation decision. - // We recompute a new random tophash for the next level so - // these keys will get evenly distributed across all buckets - // after multiple grows. - useY = top & 1 - top = tophash(hash) - } else { - if hash&newbit != 0 { - useY = 1 - } - } - } - - if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY { - throw("bad evacuatedN") - } - - b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY - dst := &xy[useY] // evacuation destination - - if dst.i == abi.OldMapBucketCount { - dst.b = h.newoverflow(t, dst.b) - dst.i = 0 - dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, abi.OldMapBucketCount*uintptr(t.KeySize)) - } - dst.b.tophash[dst.i&(abi.OldMapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check - if t.IndirectKey() { - *(*unsafe.Pointer)(dst.k) = k2 // copy pointer - } else { - typedmemmove(t.Key, dst.k, k) // copy elem - } - if t.IndirectElem() { - *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e) - } else { - typedmemmove(t.Elem, dst.e, e) - } - dst.i++ - // These updates might push these pointers past the end of the - // key or elem arrays. That's ok, as we have the overflow pointer - // at the end of the bucket to protect against pointing past the - // end of the bucket. - dst.k = add(dst.k, uintptr(t.KeySize)) - dst.e = add(dst.e, uintptr(t.ValueSize)) - } - } - // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.Pointers() { - b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) - // Preserve b.tophash because the evacuation - // state is maintained there. - ptr := add(b, dataOffset) - n := uintptr(t.BucketSize) - dataOffset - memclrHasPointers(ptr, n) - } - } - - if oldbucket == h.nevacuate { - advanceEvacuationMark(h, t, newbit) - } -} - -func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { - h.nevacuate++ - // Experiments suggest that 1024 is overkill by at least an order of magnitude. - // Put it in there as a safeguard anyway, to ensure O(1) behavior. - stop := h.nevacuate + 1024 - if stop > newbit { - stop = newbit - } - for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) { - h.nevacuate++ - } - if h.nevacuate == newbit { // newbit == # of oldbuckets - // Growing is all done. Free old main bucket array. - h.oldbuckets = nil - // Can discard old overflow buckets as well. - // If they are still referenced by an iterator, - // then the iterator holds a pointers to the slice. - if h.extra != nil { - h.extra.oldoverflow = nil - } - h.flags &^= sameSizeGrow - } -} - -// Reflect stubs. Called from ../reflect/asm_*.s - -// reflect_makemap is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_makemap reflect.makemap -func reflect_makemap(t *maptype, cap int) *hmap { - // Check invariants and reflects math. - if t.Key.Equal == nil { - throw("runtime.reflect_makemap: unsupported map key type") - } - if t.Key.Size_ > abi.OldMapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) || - t.Key.Size_ <= abi.OldMapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) { - throw("key size wrong") - } - if t.Elem.Size_ > abi.OldMapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) || - t.Elem.Size_ <= abi.OldMapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) { - throw("elem size wrong") - } - if t.Key.Align_ > abi.OldMapBucketCount { - throw("key align too big") - } - if t.Elem.Align_ > abi.OldMapBucketCount { - throw("elem align too big") - } - if t.Key.Size_%uintptr(t.Key.Align_) != 0 { - throw("key size not a multiple of key align") - } - if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 { - throw("elem size not a multiple of elem align") - } - if abi.OldMapBucketCount < 8 { - throw("bucketsize too small for proper alignment") - } - if dataOffset%uintptr(t.Key.Align_) != 0 { - throw("need padding in bucket (key)") - } - if dataOffset%uintptr(t.Elem.Align_) != 0 { - throw("need padding in bucket (elem)") - } - - return makemap(t, cap, nil) -} - -// reflect_mapaccess is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapaccess reflect.mapaccess -func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { - elem, ok := mapaccess2(t, h, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr -func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer { - elem, ok := mapaccess2_faststr(t, h, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -// reflect_mapassign is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// -//go:linkname reflect_mapassign reflect.mapassign0 -func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) { - p := mapassign(t, h, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 -func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) { - p := mapassign_faststr(t, h, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapdelete reflect.mapdelete -func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { - mapdelete(t, h, key) -} - -//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr -func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) { - mapdelete_faststr(t, h, key) -} - -// reflect_mapiterinit is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/modern-go/reflect2 -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterinit reflect.mapiterinit -func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) { - mapiterinit(t, h, it) -} - -// reflect_mapiternext is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/v2pro/plz -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiternext reflect.mapiternext -func reflect_mapiternext(it *hiter) { - mapiternext(it) -} - -// reflect_mapiterkey was for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterkey reflect.mapiterkey -func reflect_mapiterkey(it *hiter) unsafe.Pointer { - return it.key -} - -// reflect_mapiterelem was for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - gonum.org/v1/gonum -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapiterelem reflect.mapiterelem -func reflect_mapiterelem(it *hiter) unsafe.Pointer { - return it.elem -} - -// reflect_maplen is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_maplen reflect.maplen -func reflect_maplen(h *hmap) int { - if h == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return h.count -} - -//go:linkname reflect_mapclear reflect.mapclear -func reflect_mapclear(t *maptype, h *hmap) { - mapclear(t, h) -} - -//go:linkname reflectlite_maplen internal/reflectlite.maplen -func reflectlite_maplen(h *hmap) int { - if h == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return h.count -} - -// mapinitnoop is a no-op function known the Go linker; if a given global -// map (of the right size) is determined to be dead, the linker will -// rewrite the relocation (from the package init func) from the outlined -// map init function to this symbol. Defined in assembly so as to avoid -// complications with instrumentation (coverage, etc). -func mapinitnoop() - -// mapclone for implementing maps.Clone -// -//go:linkname mapclone maps.clone -func mapclone(m any) any { - e := efaceOf(&m) - e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data))) - return m -} - -// moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows -// and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. -func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { - for i := 0; i < abi.OldMapBucketCount; i++ { - if isEmpty(src.tophash[i]) { - continue - } - - for ; pos < abi.OldMapBucketCount; pos++ { - if isEmpty(dst.tophash[pos]) { - break - } - } - - if pos == abi.OldMapBucketCount { - dst = h.newoverflow(t, dst) - pos = 0 - } - - srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize)) - srcEle := add(unsafe.Pointer(src), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize)) - dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize)) - dstEle := add(unsafe.Pointer(dst), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize)) - - dst.tophash[pos] = src.tophash[i] - if t.IndirectKey() { - srcK = *(*unsafe.Pointer)(srcK) - if t.NeedKeyUpdate() { - kStore := newobject(t.Key) - typedmemmove(t.Key, kStore, srcK) - srcK = kStore - } - // Note: if NeedKeyUpdate is false, then the memory - // used to store the key is immutable, so we can share - // it between the original map and its clone. - *(*unsafe.Pointer)(dstK) = srcK - } else { - typedmemmove(t.Key, dstK, srcK) - } - if t.IndirectElem() { - srcEle = *(*unsafe.Pointer)(srcEle) - eStore := newobject(t.Elem) - typedmemmove(t.Elem, eStore, srcEle) - *(*unsafe.Pointer)(dstEle) = eStore - } else { - typedmemmove(t.Elem, dstEle, srcEle) - } - pos++ - h.count++ - } - return dst, pos -} - -func mapclone2(t *maptype, src *hmap) *hmap { - hint := src.count - if overLoadFactor(hint, src.B) { - // Note: in rare cases (e.g. during a same-sized grow) the map - // can be overloaded. Make sure we don't allocate a destination - // bucket array larger than the source bucket array. - // This will cause the cloned map to be overloaded also, - // but that's better than crashing. See issue 69110. - hint = int(loadFactorNum * (bucketShift(src.B) / loadFactorDen)) - } - dst := makemap(t, hint, nil) - dst.hash0 = src.hash0 - dst.nevacuate = 0 - // flags do not need to be copied here, just like a new map has no flags. - - if src.count == 0 { - return dst - } - - if src.flags&hashWriting != 0 { - fatal("concurrent map clone and map write") - } - - if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() { - // Quick copy for small maps. - dst.buckets = newobject(t.Bucket) - dst.count = src.count - typedmemmove(t.Bucket, dst.buckets, src.buckets) - return dst - } - - if dst.B == 0 { - dst.buckets = newobject(t.Bucket) - } - dstArraySize := int(bucketShift(dst.B)) - srcArraySize := int(bucketShift(src.B)) - for i := 0; i < dstArraySize; i++ { - dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize)))) - pos := 0 - for j := 0; j < srcArraySize; j += dstArraySize { - srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize)))) - for srcBmap != nil { - dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) - srcBmap = srcBmap.overflow(t) - } - } - } - - if src.oldbuckets == nil { - return dst - } - - oldB := src.B - srcOldbuckets := src.oldbuckets - if !src.sameSizeGrow() { - oldB-- - } - oldSrcArraySize := int(bucketShift(oldB)) - - for i := 0; i < oldSrcArraySize; i++ { - srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize)))) - if evacuated(srcBmap) { - continue - } - - if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src - dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize))) - for dstBmap.overflow(t) != nil { - dstBmap = dstBmap.overflow(t) - } - pos := 0 - for srcBmap != nil { - dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap) - srcBmap = srcBmap.overflow(t) - } - continue - } - - // oldB < dst.B, so a single source bucket may go to multiple destination buckets. - // Process entries one at a time. - for srcBmap != nil { - // move from oldBlucket to new bucket - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - if isEmpty(srcBmap.tophash[i]) { - continue - } - - if src.flags&hashWriting != 0 { - fatal("concurrent map clone and map write") - } - - srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize)) - if t.IndirectKey() { - srcK = *((*unsafe.Pointer)(srcK)) - } - - srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) - if t.IndirectElem() { - srcEle = *((*unsafe.Pointer)(srcEle)) - } - dstEle := mapassign(t, dst, srcK) - typedmemmove(t.Elem, dstEle, srcEle) - } - srcBmap = srcBmap.overflow(t) - } - } - return dst -} - -// keys for implementing maps.keys -// -//go:linkname keys maps.keys -func keys(m any, p unsafe.Pointer) { - e := efaceOf(&m) - t := (*maptype)(unsafe.Pointer(e._type)) - h := (*hmap)(e.data) - - if h == nil || h.count == 0 { - return - } - s := (*slice)(p) - r := int(rand()) - offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - if h.B == 0 { - copyKeys(t, h, (*bmap)(h.buckets), s, offset) - return - } - arraySize := int(bucketShift(h.B)) - buckets := h.buckets - for i := 0; i < arraySize; i++ { - bucket := (i + r) & (arraySize - 1) - b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) - copyKeys(t, h, b, s, offset) - } - - if h.growing() { - oldArraySize := int(h.noldbuckets()) - for i := 0; i < oldArraySize; i++ { - bucket := (i + r) & (oldArraySize - 1) - b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) - if evacuated(b) { - continue - } - copyKeys(t, h, b, s, offset) - } - } - return -} - -func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { - for b != nil { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) { - continue - } - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize)) - if t.IndirectKey() { - k = *((*unsafe.Pointer)(k)) - } - if s.len >= s.cap { - fatal("concurrent map read and map write") - } - typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k) - s.len++ - } - b = b.overflow(t) - } -} - -// values for implementing maps.values -// -//go:linkname values maps.values -func values(m any, p unsafe.Pointer) { - e := efaceOf(&m) - t := (*maptype)(unsafe.Pointer(e._type)) - h := (*hmap)(e.data) - if h == nil || h.count == 0 { - return - } - s := (*slice)(p) - r := int(rand()) - offset := uint8(r >> h.B & (abi.OldMapBucketCount - 1)) - if h.B == 0 { - copyValues(t, h, (*bmap)(h.buckets), s, offset) - return - } - arraySize := int(bucketShift(h.B)) - buckets := h.buckets - for i := 0; i < arraySize; i++ { - bucket := (i + r) & (arraySize - 1) - b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize))) - copyValues(t, h, b, s, offset) - } - - if h.growing() { - oldArraySize := int(h.noldbuckets()) - for i := 0; i < oldArraySize; i++ { - bucket := (i + r) & (oldArraySize - 1) - b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize))) - if evacuated(b) { - continue - } - copyValues(t, h, b, s, offset) - } - } - return -} - -func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { - for b != nil { - for i := uintptr(0); i < abi.OldMapBucketCount; i++ { - offi := (i + uintptr(offset)) & (abi.OldMapBucketCount - 1) - if isEmpty(b.tophash[offi]) { - continue - } - - if h.flags&hashWriting != 0 { - fatal("concurrent map read and map write") - } - - ele := add(unsafe.Pointer(b), dataOffset+abi.OldMapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize)) - if t.IndirectElem() { - ele = *((*unsafe.Pointer)(ele)) - } - if s.len >= s.cap { - fatal("concurrent map read and map write") - } - typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele) - s.len++ - } - b = b.overflow(t) - } -} diff --git a/src/runtime/map_noswiss_test.go b/src/runtime/map_noswiss_test.go deleted file mode 100644 index 5af7b7b8c8..0000000000 --- a/src/runtime/map_noswiss_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.swissmap - -package runtime_test - -import ( - "internal/abi" - "internal/goarch" - "runtime" - "slices" - "testing" -) - -func TestHmapSize(t *testing.T) { - // The structure of hmap is defined in runtime/map.go - // and in cmd/compile/internal/reflectdata/map.go and must be in sync. - // The size of hmap should be 56 bytes on 64 bit and 36 bytes on 32 bit platforms. - var hmapSize = uintptr(2*8 + 5*goarch.PtrSize) - if runtime.RuntimeHmapSize != hmapSize { - t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize) - } -} - -func TestLoadFactor(t *testing.T) { - for b := uint8(0); b < 20; b++ { - count := 13 * (1 << b) / 2 // 6.5 - if b == 0 { - count = 8 - } - if runtime.OverLoadFactor(count, b) { - t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b) - } - if !runtime.OverLoadFactor(count+1, b) { - t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b) - } - } -} - -func TestMapIterOrder(t *testing.T) { - sizes := []int{3, 7, 9, 15} - if abi.OldMapBucketCountBits >= 5 { - // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5. - t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.OldMapBucketCountBits) - } - for _, n := range sizes { - for i := 0; i < 1000; i++ { - // Make m be {0: true, 1: true, ..., n-1: true}. - m := make(map[int]bool) - for i := 0; i < n; i++ { - m[i] = true - } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s - } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !slices.Equal(first, ord()) { - ok = true - break - } - } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) - break - } - } - } -} - -const bs = abi.OldMapBucketCount - -// belowOverflow should be a pretty-full pair of buckets; -// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets -// that are 13/16 full each, which is the overflow boundary. -// Adding one to that should ensure overflow to the next higher size. -const ( - belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75% - atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill. -) - -var mapBucketTests = [...]struct { - n int // n is the number of map elements - noescape int // number of expected buckets for non-escaping map - escape int // number of expected buckets for escaping map -}{ - {-(1 << 30), 1, 1}, - {-1, 1, 1}, - {0, 1, 1}, - {1, 1, 1}, - {bs, 1, 1}, - {bs + 1, 2, 2}, - {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75% - {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4 - - {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75% - {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8 - - {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75% - {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16 -} - -func TestMapBuckets(t *testing.T) { - // Test that maps of different sizes have the right number of buckets. - // Non-escaping maps with small buckets (like map[int]int) never - // have a nil bucket pointer due to starting with preallocated buckets - // on the stack. Escaping maps start with a non-nil bucket pointer if - // hint size is above bucketCnt and thereby have more than one bucket. - // These tests depend on bucketCnt and loadFactor* in map.go. - t.Run("mapliteral", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := map[int]int{} - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(map[int]int{}) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("nohint", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("makemap", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int, tt.n) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int, tt.n)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) - t.Run("makemap64", func(t *testing.T) { - for _, tt := range mapBucketTests { - localMap := make(map[int]int, int64(tt.n)) - if runtime.MapBucketsPointerIsNil(localMap) { - t.Errorf("no escape: buckets pointer is nil for non-escaping map") - } - for i := 0; i < tt.n; i++ { - localMap[i] = i - } - if got := runtime.MapBucketsCount(localMap); got != tt.noescape { - t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) - } - escapingMap := runtime.Escape(make(map[int]int, tt.n)) - if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { - t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) - } - for i := 0; i < tt.n; i++ { - escapingMap[i] = i - } - if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { - t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) - } - } - }) -} diff --git a/src/runtime/map_swiss.go b/src/runtime/map_swiss.go deleted file mode 100644 index c2cf08fcaa..0000000000 --- a/src/runtime/map_swiss.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime - -import ( - "internal/abi" - "internal/runtime/maps" - "internal/runtime/sys" - "unsafe" -) - -const ( - // TODO: remove? These are used by tests but not the actual map - loadFactorNum = 7 - loadFactorDen = 8 -) - -type maptype = abi.SwissMapType - -//go:linkname maps_errNilAssign internal/runtime/maps.errNilAssign -var maps_errNilAssign error = plainError("assignment to entry in nil map") - -func makemap64(t *abi.SwissMapType, hint int64, m *maps.Map) *maps.Map { - if int64(int(hint)) != hint { - hint = 0 - } - return makemap(t, int(hint), m) -} - -// makemap_small implements Go map creation for make(map[k]v) and -// make(map[k]v, hint) when hint is known to be at most abi.SwissMapGroupSlots -// at compile time and the map needs to be allocated on the heap. -// -// makemap_small should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname makemap_small -func makemap_small() *maps.Map { - return maps.NewEmptyMap() -} - -// makemap implements Go map creation for make(map[k]v, hint). -// If the compiler has determined that the map or the first group -// can be created on the stack, m and optionally m.dirPtr may be non-nil. -// If m != nil, the map can be created directly in m. -// If m.dirPtr != nil, it points to a group usable for a small map. -// -// makemap should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname makemap -func makemap(t *abi.SwissMapType, hint int, m *maps.Map) *maps.Map { - if hint < 0 { - hint = 0 - } - - return maps.NewMap(t, uintptr(hint), m, maxAlloc) -} - -// mapaccess1 returns a pointer to h[key]. Never returns nil, instead -// it will return a reference to the zero object for the elem type if -// the key is not in the map. -// NOTE: The returned pointer may keep the whole map live, so don't -// hold onto it for very long. -// -// mapaccess1 is pushed from internal/runtime/maps. We could just call it, but -// we want to avoid one layer of call. -// -//go:linkname mapaccess1 -func mapaccess1(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer - -// mapaccess2 should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapaccess2 -func mapaccess2(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) (unsafe.Pointer, bool) - -func mapaccess1_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) unsafe.Pointer { - e := mapaccess1(t, m, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero - } - return e -} - -func mapaccess2_fat(t *abi.SwissMapType, m *maps.Map, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { - e := mapaccess1(t, m, key) - if e == unsafe.Pointer(&zeroVal[0]) { - return zero, false - } - return e, true -} - -// mapassign is pushed from internal/runtime/maps. We could just call it, but -// we want to avoid one layer of call. -// -// mapassign should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/bytedance/sonic -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapassign -func mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer - -// mapdelete should be an internal detail, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/ugorji/go/codec -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname mapdelete -func mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { - if raceenabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapdelete) - racewritepc(unsafe.Pointer(m), callerpc, pc) - raceReadObjectPC(t.Key, key, callerpc, pc) - } - if msanenabled && m != nil { - msanread(key, t.Key.Size_) - } - if asanenabled && m != nil { - asanread(key, t.Key.Size_) - } - - m.Delete(t, key) -} - -// mapIterStart initializes the Iter struct used for ranging over maps and -// performs the first step of iteration. The Iter struct pointed to by 'it' is -// allocated on the stack by the compilers order pass or on the heap by -// reflect. Both need to have zeroed it since the struct contains pointers. -func mapIterStart(t *abi.SwissMapType, m *maps.Map, it *maps.Iter) { - if raceenabled && m != nil { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(mapIterStart)) - } - - it.Init(t, m) - it.Next() -} - -// mapIterNext performs the next step of iteration. Afterwards, the next -// key/elem are in it.Key()/it.Elem(). -func mapIterNext(it *maps.Iter) { - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(it.Map()), callerpc, abi.FuncPCABIInternal(mapIterNext)) - } - - it.Next() -} - -// mapclear deletes all keys from a map. -func mapclear(t *abi.SwissMapType, m *maps.Map) { - if raceenabled && m != nil { - callerpc := sys.GetCallerPC() - pc := abi.FuncPCABIInternal(mapclear) - racewritepc(unsafe.Pointer(m), callerpc, pc) - } - - m.Clear(t) -} - -// Reflect stubs. Called from ../reflect/asm_*.s - -// reflect_makemap is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/goccy/go-json -// - github.com/RomiChan/protobuf -// - github.com/segmentio/encoding -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_makemap reflect.makemap -func reflect_makemap(t *abi.SwissMapType, cap int) *maps.Map { - // Check invariants and reflects math. - if t.Key.Equal == nil { - throw("runtime.reflect_makemap: unsupported map key type") - } - // TODO: other checks - - return makemap(t, cap, nil) -} - -// reflect_mapaccess is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/modern-go/reflect2 -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_mapaccess reflect.mapaccess -func reflect_mapaccess(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) unsafe.Pointer { - elem, ok := mapaccess2(t, m, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -//go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr -func reflect_mapaccess_faststr(t *abi.SwissMapType, m *maps.Map, key string) unsafe.Pointer { - elem, ok := mapaccess2_faststr(t, m, key) - if !ok { - // reflect wants nil for a missing element - elem = nil - } - return elem -} - -// reflect_mapassign is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - gitee.com/quant1x/gox -// - github.com/v2pro/plz -// -// Do not remove or change the type signature. -// -//go:linkname reflect_mapassign reflect.mapassign0 -func reflect_mapassign(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer, elem unsafe.Pointer) { - p := mapassign(t, m, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0 -func reflect_mapassign_faststr(t *abi.SwissMapType, m *maps.Map, key string, elem unsafe.Pointer) { - p := mapassign_faststr(t, m, key) - typedmemmove(t.Elem, p, elem) -} - -//go:linkname reflect_mapdelete reflect.mapdelete -func reflect_mapdelete(t *abi.SwissMapType, m *maps.Map, key unsafe.Pointer) { - mapdelete(t, m, key) -} - -//go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr -func reflect_mapdelete_faststr(t *abi.SwissMapType, m *maps.Map, key string) { - mapdelete_faststr(t, m, key) -} - -// reflect_maplen is for package reflect, -// but widely used packages access it using linkname. -// Notable members of the hall of shame include: -// - github.com/goccy/go-json -// - github.com/wI2L/jettison -// -// Do not remove or change the type signature. -// See go.dev/issue/67401. -// -//go:linkname reflect_maplen reflect.maplen -func reflect_maplen(m *maps.Map) int { - if m == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return int(m.Used()) -} - -//go:linkname reflect_mapclear reflect.mapclear -func reflect_mapclear(t *abi.SwissMapType, m *maps.Map) { - mapclear(t, m) -} - -//go:linkname reflectlite_maplen internal/reflectlite.maplen -func reflectlite_maplen(m *maps.Map) int { - if m == nil { - return 0 - } - if raceenabled { - callerpc := sys.GetCallerPC() - racereadpc(unsafe.Pointer(m), callerpc, abi.FuncPCABIInternal(reflect_maplen)) - } - return int(m.Used()) -} - -// mapinitnoop is a no-op function known the Go linker; if a given global -// map (of the right size) is determined to be dead, the linker will -// rewrite the relocation (from the package init func) from the outlined -// map init function to this symbol. Defined in assembly so as to avoid -// complications with instrumentation (coverage, etc). -func mapinitnoop() - -// mapclone for implementing maps.Clone -// -//go:linkname mapclone maps.clone -func mapclone(m any) any { - e := efaceOf(&m) - typ := (*abi.SwissMapType)(unsafe.Pointer(e._type)) - map_ := (*maps.Map)(e.data) - map_ = map_.Clone(typ) - e.data = (unsafe.Pointer)(map_) - return m -} - -// keys for implementing maps.keys -// -//go:linkname keys maps.keys -func keys(m any, p unsafe.Pointer) { - // Currently unused in the maps package. - panic("unimplemented") -} - -// values for implementing maps.values -// -//go:linkname values maps.values -func values(m any, p unsafe.Pointer) { - // Currently unused in the maps package. - panic("unimplemented") -} diff --git a/src/runtime/map_swiss_test.go b/src/runtime/map_swiss_test.go deleted file mode 100644 index d5c9fdbe46..0000000000 --- a/src/runtime/map_swiss_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.swissmap - -package runtime_test - -import ( - "internal/abi" - "internal/goarch" - "internal/runtime/maps" - "slices" - "testing" - "unsafe" -) - -func TestHmapSize(t *testing.T) { - // The structure of Map is defined in internal/runtime/maps/map.go - // and in cmd/compile/internal/reflectdata/map_swiss.go and must be in sync. - // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms. - wantSize := uintptr(2*8 + 4*goarch.PtrSize) - gotSize := unsafe.Sizeof(maps.Map{}) - if gotSize != wantSize { - t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize) - } -} - -// See also reflect_test.TestGroupSizeZero. -func TestGroupSizeZero(t *testing.T) { - var m map[struct{}]struct{} - mTyp := abi.TypeOf(m) - mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp)) - - // internal/runtime/maps when create pointers to slots, even if slots - // are size 0. The compiler should have reserved an extra word to - // ensure that pointers to the zero-size type at the end of group are - // valid. - if mt.Group.Size() <= 8 { - t.Errorf("Group size got %d want >8", mt.Group.Size()) - } -} - -func TestMapIterOrder(t *testing.T) { - sizes := []int{3, 7, 9, 15} - for _, n := range sizes { - for i := 0; i < 1000; i++ { - // Make m be {0: true, 1: true, ..., n-1: true}. - m := make(map[int]bool) - for i := 0; i < n; i++ { - m[i] = true - } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s - } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !slices.Equal(first, ord()) { - ok = true - break - } - } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) - break - } - } - } -} diff --git a/src/runtime/map_test.go b/src/runtime/map_test.go index b1ff02d851..ff7fbeb230 100644 --- a/src/runtime/map_test.go +++ b/src/runtime/map_test.go @@ -6,7 +6,9 @@ package runtime_test import ( "fmt" - "internal/goexperiment" + "internal/abi" + "internal/goarch" + "internal/runtime/maps" "internal/testenv" "math" "os" @@ -812,31 +814,6 @@ func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { } } -func TestMapTombstones(t *testing.T) { - m := map[int]int{} - const N = 10000 - // Fill a map. - for i := 0; i < N; i++ { - m[i] = i - } - runtime.MapTombstoneCheck(m) - // Delete half of the entries. - for i := 0; i < N; i += 2 { - delete(m, i) - } - runtime.MapTombstoneCheck(m) - // Add new entries to fill in holes. - for i := N; i < 3*N/2; i++ { - m[i] = i - } - runtime.MapTombstoneCheck(m) - // Delete everything. - for i := 0; i < 3*N/2; i++ { - delete(m, i) - } - runtime.MapTombstoneCheck(m) -} - type canString int func (c canString) String() string { @@ -1060,44 +1037,6 @@ func TestEmptyMapWithInterfaceKey(t *testing.T) { }) } -func TestMapKeys(t *testing.T) { - if goexperiment.SwissMap { - t.Skip("mapkeys not implemented for swissmaps") - } - - type key struct { - s string - pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes - } - m := map[key]int{{s: "a"}: 1, {s: "b"}: 2} - keys := make([]key, 0, len(m)) - runtime.MapKeys(m, unsafe.Pointer(&keys)) - for _, k := range keys { - if len(k.s) != 1 { - t.Errorf("len(k.s) == %d, want 1", len(k.s)) - } - } -} - -func TestMapValues(t *testing.T) { - if goexperiment.SwissMap { - t.Skip("mapvalues not implemented for swissmaps") - } - - type val struct { - s string - pad [128]byte // sizeof(val) > abi.MapMaxElemBytes - } - m := map[int]val{1: {s: "a"}, 2: {s: "b"}} - vals := make([]val, 0, len(m)) - runtime.MapValues(m, unsafe.Pointer(&vals)) - for _, v := range vals { - if len(v.s) != 1 { - t.Errorf("len(v.s) == %d, want 1", len(v.s)) - } - } -} - func computeHash() uintptr { var v struct{} return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v)) @@ -1202,3 +1141,62 @@ func TestMapIterDeleteReplace(t *testing.T) { }) } } + +func TestHmapSize(t *testing.T) { + // The structure of Map is defined in internal/runtime/maps/map.go + // and in cmd/compile/internal/reflectdata/map.go and must be in sync. + // The size of Map should be 48 bytes on 64 bit and 32 bytes on 32 bit platforms. + wantSize := uintptr(2*8 + 4*goarch.PtrSize) + gotSize := unsafe.Sizeof(maps.Map{}) + if gotSize != wantSize { + t.Errorf("sizeof(maps.Map{})==%d, want %d", gotSize, wantSize) + } +} + +// See also reflect_test.TestGroupSizeZero. +func TestGroupSizeZero(t *testing.T) { + var m map[struct{}]struct{} + mTyp := abi.TypeOf(m) + mt := (*abi.SwissMapType)(unsafe.Pointer(mTyp)) + + // internal/runtime/maps when create pointers to slots, even if slots + // are size 0. The compiler should have reserved an extra word to + // ensure that pointers to the zero-size type at the end of group are + // valid. + if mt.Group.Size() <= 8 { + t.Errorf("Group size got %d want >8", mt.Group.Size()) + } +} + +func TestMapIterOrder(t *testing.T) { + sizes := []int{3, 7, 9, 15} + for _, n := range sizes { + for i := 0; i < 1000; i++ { + // Make m be {0: true, 1: true, ..., n-1: true}. + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + // Check that iterating over the map produces at least two different orderings. + ord := func() []int { + var s []int + for key := range m { + s = append(s, key) + } + return s + } + first := ord() + ok := false + for try := 0; try < 100; try++ { + if !slices.Equal(first, ord()) { + ok = true + break + } + } + if !ok { + t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + break + } + } + } +} diff --git a/src/runtime/runtime-gdb.py b/src/runtime/runtime-gdb.py index 6d99515176..23b1f331c8 100644 --- a/src/runtime/runtime-gdb.py +++ b/src/runtime/runtime-gdb.py @@ -160,13 +160,6 @@ class MapTypePrinter: return str(self.val.type) def children(self): - fields = [f.name for f in self.val.type.strip_typedefs().target().fields()] - if 'buckets' in fields: - yield from self.old_map_children() - else: - yield from self.swiss_map_children() - - def swiss_map_children(self): SwissMapGroupSlots = 8 # see internal/abi:SwissMapGroupSlots cnt = 0 @@ -270,40 +263,6 @@ class MapTypePrinter: yield from group_slots(group) - def old_map_children(self): - MapBucketCount = 8 # see internal/abi:OldMapBucketCount - B = self.val['B'] - buckets = self.val['buckets'] - oldbuckets = self.val['oldbuckets'] - flags = self.val['flags'] - inttype = self.val['hash0'].type - cnt = 0 - for bucket in xrange(2 ** int(B)): - bp = buckets + bucket - if oldbuckets: - oldbucket = bucket & (2 ** (B - 1) - 1) - oldbp = oldbuckets + oldbucket - oldb = oldbp.dereference() - if (oldb['overflow'].cast(inttype) & 1) == 0: # old bucket not evacuated yet - if bucket >= 2 ** (B - 1): - continue # already did old bucket - bp = oldbp - while bp: - b = bp.dereference() - for i in xrange(MapBucketCount): - if b['tophash'][i] != 0: - k = b['keys'][i] - v = b['values'][i] - if flags & 1: - k = k.dereference() - if flags & 2: - v = v.dereference() - yield str(cnt), k - yield str(cnt + 1), v - cnt += 2 - bp = b['overflow'] - - class ChanTypePrinter: """Pretty print chan[T] types. diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 47c1fe5851..e81efadeb3 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -8,8 +8,6 @@ import ( "bytes" "flag" "fmt" - "internal/abi" - "internal/goexperiment" "internal/testenv" "os" "os/exec" @@ -155,9 +153,6 @@ func checkPtraceScope(t *testing.T) { } } -// NOTE: the maps below are allocated larger than abi.MapBucketCount -// to ensure that they are not "optimized out". - var helloSource = ` import "fmt" import "runtime" @@ -166,19 +161,21 @@ var gslice []string var smallmapvar map[string]string func main() { smallmapvar = make(map[string]string) - mapvar := make(map[string]string, ` + strconv.FormatInt(abi.OldMapBucketCount+9, 10) + `) - slicemap := make(map[string][]string,` + strconv.FormatInt(abi.OldMapBucketCount+3, 10) + `) - chanint := make(chan int, 10) - chanstr := make(chan string, 10) - chanint <- 99 + // NOTE: the maps below are allocated large to ensure that they are not + // "optimized out". + mapvar := make(map[string]string, 10) + slicemap := make(map[string][]string, 10) + chanint := make(chan int, 10) + chanstr := make(chan string, 10) + chanint <- 99 chanint <- 11 - chanstr <- "spongepants" - chanstr <- "squarebob" + chanstr <- "spongepants" + chanstr <- "squarebob" smallmapvar["abc"] = "def" mapvar["abc"] = "def" mapvar["ghi"] = "jkl" slicemap["a"] = []string{"b","c","d"} - slicemap["e"] = []string{"f","g","h"} + slicemap["e"] = []string{"f","g","h"} strvar := "abc" ptrvar := &strvar slicevar := make([]string, 0, 16) @@ -638,20 +635,10 @@ func TestGdbAutotmpTypes(t *testing.T) { types := []string{ "[]main.astruct", "main.astruct", - } - if goexperiment.SwissMap { - types = append(types, []string{ - "groupReference", - "table", - "map", - "map * map[string]main.astruct", - }...) - } else { - types = append(types, []string{ - "bucket", - "hash", - "hash * map[string]main.astruct", - }...) + "groupReference", + "table", + "map", + "map * map[string]main.astruct", } for _, name := range types { if !strings.Contains(sgot, name) { diff --git a/src/runtime/type.go b/src/runtime/type.go index baaaeafc4a..636ea3a485 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -9,7 +9,6 @@ package runtime import ( "internal/abi" "internal/goarch" - "internal/goexperiment" "internal/runtime/atomic" "unsafe" ) @@ -605,13 +604,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { } return true case abi.Map: - if goexperiment.SwissMap { - mt := (*abi.SwissMapType)(unsafe.Pointer(t)) - mv := (*abi.SwissMapType)(unsafe.Pointer(v)) - return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen) - } - mt := (*abi.OldMapType)(unsafe.Pointer(t)) - mv := (*abi.OldMapType)(unsafe.Pointer(v)) + mt := (*abi.SwissMapType)(unsafe.Pointer(t)) + mv := (*abi.SwissMapType)(unsafe.Pointer(v)) return typesEqual(mt.Key, mv.Key, seen) && typesEqual(mt.Elem, mv.Elem, seen) case abi.Pointer: pt := (*ptrtype)(unsafe.Pointer(t)) diff --git a/test/fixedbugs/issue69110.go b/test/fixedbugs/issue69110.go deleted file mode 100644 index ab51d0b5a6..0000000000 --- a/test/fixedbugs/issue69110.go +++ /dev/null @@ -1,59 +0,0 @@ -// run - -//go:build !goexperiment.swissmap - -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "maps" - _ "unsafe" -) - -func main() { - for i := 0; i < 100; i++ { - f() - } -} - -const NB = 4 - -func f() { - // Make a map with NB buckets, at max capacity. - // 6.5 entries/bucket. - ne := NB * 13 / 2 - m := map[int]int{} - for i := 0; i < ne; i++ { - m[i] = i - } - - // delete/insert a lot, to hopefully get lots of overflow buckets - // and trigger a same-size grow. - ssg := false - for i := ne; i < ne+1000; i++ { - delete(m, i-ne) - m[i] = i - if sameSizeGrow(m) { - ssg = true - break - } - } - if !ssg { - return - } - - // Insert 1 more entry, which would ordinarily trigger a growth. - // We can't grow while growing, so we instead go over our - // target capacity. - m[-1] = -1 - - // Cloning in this state will make a map with a destination bucket - // array twice the size of the source. - _ = maps.Clone(m) -} - -//go:linkname sameSizeGrow runtime.sameSizeGrowForIssue69110Test -func sameSizeGrow(m map[int]int) bool diff --git a/test/fixedbugs/issue70189.go b/test/fixedbugs/issue70189.go index 357ac537ad..37ee5b9123 100644 --- a/test/fixedbugs/issue70189.go +++ b/test/fixedbugs/issue70189.go @@ -1,4 +1,4 @@ -// run -goexperiment noswissmap +// run // Copyright 2024 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/test/live.go b/test/live.go index 46e5e3e757..6e1a60557c 100644 --- a/test/live.go +++ b/test/live.go @@ -492,7 +492,7 @@ func f28(b bool) { func f29(b bool) { if b { - for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$" + for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$" printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$" } } @@ -695,7 +695,7 @@ func newT40() *T40 { func good40() { ret := T40{} // ERROR "stack object ret T40$" - ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$" + ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$" t := &ret printnl() // ERROR "live at call to printnl: ret$" // Note: ret is live at the printnl because the compiler moves &ret diff --git a/test/live2.go b/test/live2.go index 313caa6fb8..d88ee6df2c 100644 --- a/test/live2.go +++ b/test/live2.go @@ -27,14 +27,14 @@ func newT40() *T40 { } func bad40() { - t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$" + t := newT40() // ERROR "stack object ret T40$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$" printnl() // ERROR "live at call to printnl: ret$" useT40(t) } func good40() { ret := T40{} // ERROR "stack object ret T40$" - ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$" + ret.m = make(map[int]int, 42) // ERROR "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$" t := &ret printnl() // ERROR "live at call to printnl: ret$" useT40(t) diff --git a/test/live_regabi.go b/test/live_regabi.go index ddb4caed1a..d2565ba913 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -490,7 +490,7 @@ func f28(b bool) { func f29(b bool) { if b { - for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hiter|internal/runtime/maps.Iter)$" + for k := range m { // ERROR "live at call to (mapiterinit|mapIterStart): .autotmp_[0-9]+$" "live at call to (mapiternext|mapIterNext): .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Iter$" printstring(k) // ERROR "live at call to printstring: .autotmp_[0-9]+$" } } @@ -693,7 +693,7 @@ func newT40() *T40 { func good40() { ret := T40{} // ERROR "stack object ret T40$" - ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ (runtime.hmap|internal/runtime/maps.Map)$" + ret.m = make(map[int]int) // ERROR "live at call to rand(32)?: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ internal/runtime/maps.Map$" t := &ret printnl() // ERROR "live at call to printnl: ret$" // Note: ret is live at the printnl because the compiler moves &ret -- cgit v1.3-6-g1900