diff options
Diffstat (limited to 'src/runtime')
29 files changed, 329 insertions, 362 deletions
diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 2a413eeef3..3e30e7ca77 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/abi" "internal/cpu" "internal/goarch" "unsafe" @@ -100,7 +101,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { return h } t := tab._type - if t.equal == nil { + if t.Equal == nil { // Check hashability here. We could do this check inside // typehash, but we want to report the topmost type in // the error text (e.g. in a struct with a field of slice type @@ -120,7 +121,7 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { if t == nil { return h } - if t.equal == nil { + if t.Equal == nil { // See comment in interhash above. panic(errorString("hash of unhashable type " + t.string())) } @@ -142,18 +143,18 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { // Note: this function must match the compiler generated // functions exactly. See issue 37716. func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { - if t.tflag&tflagRegularMemory != 0 { + if t.TFlag&abi.TFlagRegularMemory != 0 { // Handle ptr sizes specially, see issue 37086. - switch t.size { + switch t.Size_ { case 4: return memhash32(p, h) case 8: return memhash64(p, h) default: - return memhash(p, h, t.size) + return memhash(p, h, t.Size_) } } - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindFloat32: return f32hash(p, h) case kindFloat64: @@ -173,7 +174,7 @@ func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { case kindArray: a := (*arraytype)(unsafe.Pointer(t)) for i := uintptr(0); i < a.len; i++ { - h = typehash(a.elem, add(p, i*a.elem.size), h) + h = typehash(a.elem, add(p, i*a.elem.Size_), h) } return h case kindStruct: @@ -244,7 +245,7 @@ func efaceeq(t *_type, x, y unsafe.Pointer) bool { if t == nil { return true } - eq := t.equal + eq := t.Equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } @@ -261,7 +262,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { return true } t := tab._type - eq := t.equal + eq := t.Equal if eq == nil { panic(errorString("comparing uncomparable type " + t.string())) } diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 7ff612e902..235efa1533 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -109,7 +109,7 @@ func arena_newArena() unsafe.Pointer { //go:linkname arena_arena_New arena.runtime_arena_arena_New func arena_arena_New(arena unsafe.Pointer, typ any) any { t := (*_type)(efaceOf(&typ).data) - if t.kind&kindMask != kindPtr { + if t.Kind_&kindMask != kindPtr { throw("arena_New: non-pointer type") } te := (*ptrtype)(unsafe.Pointer(t)).elem @@ -143,7 +143,7 @@ func arena_heapify(s any) any { var v unsafe.Pointer e := efaceOf(&s) t := e._type - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindString: v = stringStructOf((*string)(e.data)).str case kindSlice: @@ -160,7 +160,7 @@ func arena_heapify(s any) any { } // Heap-allocate storage for a copy. var x any - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindString: s1 := s.(string) s2, b := rawstring(len(s1)) @@ -281,11 +281,11 @@ func (a *userArena) slice(sl any, cap int) { } i := efaceOf(&sl) typ := i._type - if typ.kind&kindMask != kindPtr { + if typ.Kind_&kindMask != kindPtr { panic("slice result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).elem - if typ.kind&kindMask != kindSlice { + if typ.Kind_&kindMask != kindSlice { panic("slice of non-ptr-to-slice type") } typ = (*slicetype)(unsafe.Pointer(typ)).elem @@ -435,7 +435,7 @@ var userArenaState struct { // userArenaNextFree reserves space in the user arena for an item of the specified // type. If cap is not -1, this is for an array of cap elements of type t. func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { - size := typ.size + size := typ.Size_ if cap > 0 { if size > ^uintptr(0)/uintptr(cap) { // Overflow. @@ -468,14 +468,14 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { mp.mallocing = 1 var ptr unsafe.Pointer - if typ.ptrdata == 0 { + if typ.PtrBytes == 0 { // Allocate pointer-less objects from the tail end of the chunk. - v, ok := s.userArenaChunkFree.takeFromBack(size, typ.align) + v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_) if ok { ptr = unsafe.Pointer(v) } } else { - v, ok := s.userArenaChunkFree.takeFromFront(size, typ.align) + v, ok := s.userArenaChunkFree.takeFromFront(size, typ.Align_) if ok { ptr = unsafe.Pointer(v) } @@ -490,7 +490,7 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { throw("arena chunk needs zeroing, but should already be zeroed") } // Set up heap bitmap and do extra accounting. - if typ.ptrdata != 0 { + if typ.PtrBytes != 0 { if cap >= 0 { userArenaHeapBitsSetSliceType(typ, cap, ptr, s.base()) } else { @@ -501,9 +501,9 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { throw("mallocgc called without a P or outside bootstrapping") } if cap > 0 { - c.scanAlloc += size - (typ.size - typ.ptrdata) + c.scanAlloc += size - (typ.Size_ - typ.PtrBytes) } else { - c.scanAlloc += typ.ptrdata + c.scanAlloc += typ.PtrBytes } } @@ -556,14 +556,14 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) { h = h.write(b, 1) } - p := typ.gcdata // start of 1-bit pointer mask (or GC program) + p := typ.GCData // start of 1-bit pointer mask (or GC program) var gcProgBits uintptr - if typ.kind&kindGCProg != 0 { + if typ.Kind_&kindGCProg != 0 { // Expand gc program, using the object itself for storage. gcProgBits = runGCProg(addb(p, 4), (*byte)(ptr)) p = (*byte)(ptr) } - nb := typ.ptrdata / goarch.PtrSize + nb := typ.PtrBytes / goarch.PtrSize for i := uintptr(0); i < nb; i += ptrBits { k := nb - i @@ -578,10 +578,10 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) { // to clear. We don't need to do this to clear stale noMorePtrs // markers from previous uses because arena chunk pointer bitmaps // are always fully cleared when reused. - h = h.pad(typ.size - typ.ptrdata) - h.flush(uintptr(ptr), typ.size) + h = h.pad(typ.Size_ - typ.PtrBytes) + h.flush(uintptr(ptr), typ.Size_) - if typ.kind&kindGCProg != 0 { + if typ.Kind_&kindGCProg != 0 { // Zero out temporary ptrmask buffer inside object. memclrNoHeapPointers(ptr, (gcProgBits+7)/8) } @@ -591,16 +591,16 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) { // Derived from heapBitsSetType. const doubleCheck = false if doubleCheck { - size := typ.size + size := typ.Size_ x := uintptr(ptr) h := heapBitsForAddr(x, size) for i := uintptr(0); i < size; i += goarch.PtrSize { // Compute the pointer bit we want at offset i. want := false - off := i % typ.size - if off < typ.ptrdata { + off := i % typ.Size_ + if off < typ.PtrBytes { j := off / goarch.PtrSize - want = *addb(typ.gcdata, j/8)>>(j%8)&1 != 0 + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 } if want { var addr uintptr @@ -620,12 +620,12 @@ func userArenaHeapBitsSetType(typ *_type, ptr unsafe.Pointer, base uintptr) { // Go slice backing store values allocated in a user arena chunk. It sets up the // heap bitmap for n consecutive values with type typ allocated at address ptr. func userArenaHeapBitsSetSliceType(typ *_type, n int, ptr unsafe.Pointer, base uintptr) { - mem, overflow := math.MulUintptr(typ.size, uintptr(n)) + mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) if overflow || n < 0 || mem > maxAlloc { panic(plainError("runtime: allocation size out of range")) } for i := 0; i < n; i++ { - userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.size), base) + userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base) } } diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index b29523cab1..c0b0f4fe85 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -407,18 +407,18 @@ func cgoCheckPointer(ptr any, arg any) { t := ep._type top := true - if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) { + if arg != nil && (t.Kind_&kindMask == kindPtr || t.Kind_&kindMask == kindUnsafePointer) { p := ep.data - if t.kind&kindDirectIface == 0 { + if t.Kind_&kindDirectIface == 0 { p = *(*unsafe.Pointer)(p) } if p == nil || !cgoIsGoPointer(p) { return } aep := efaceOf(&arg) - switch aep._type.kind & kindMask { + switch aep._type.Kind_ & kindMask { case kindBool: - if t.kind&kindMask == kindUnsafePointer { + if t.Kind_&kindMask == kindUnsafePointer { // We don't know the type of the element. break } @@ -441,7 +441,7 @@ func cgoCheckPointer(ptr any, arg any) { } } - cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail) + cgoCheckArg(t, ep.data, t.Kind_&kindDirectIface == 0, top, cgoCheckPointerFail) } const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer" @@ -452,12 +452,12 @@ const cgoResultFail = "cgo result has Go pointer" // depending on indir. The top parameter is whether we are at the top // level, where Go pointers are allowed. func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { - if t.ptrdata == 0 || p == nil { + if t.PtrBytes == 0 || p == nil { // If the type has no pointers there is nothing to do. return } - switch t.kind & kindMask { + switch t.Kind_ & kindMask { default: throw("can't happen") case kindArray: @@ -466,12 +466,12 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if at.len != 1 { throw("can't happen") } - cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg) + cgoCheckArg(at.elem, p, at.elem.Kind_&kindDirectIface == 0, top, msg) return } for i := uintptr(0); i < at.len; i++ { cgoCheckArg(at.elem, p, true, top, msg) - p = add(p, at.elem.size) + p = add(p, at.elem.Size_) } case kindChan, kindMap: // These types contain internal pointers that will @@ -504,7 +504,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !top { panic(errorString(msg)) } - cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg) + cgoCheckArg(it, p, it.Kind_&kindDirectIface == 0, false, msg) case kindSlice: st := (*slicetype)(unsafe.Pointer(t)) s := (*slice)(p) @@ -515,12 +515,12 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !top { panic(errorString(msg)) } - if st.elem.ptrdata == 0 { + if st.elem.PtrBytes == 0 { return } for i := 0; i < s.cap; i++ { cgoCheckArg(st.elem, p, true, false, msg) - p = add(p, st.elem.size) + p = add(p, st.elem.Size_) } case kindString: ss := (*stringStruct)(p) @@ -536,11 +536,11 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if len(st.fields) != 1 { throw("can't happen") } - cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg) + cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.Kind_&kindDirectIface == 0, top, msg) return } for _, f := range st.fields { - if f.typ.ptrdata == 0 { + if f.typ.PtrBytes == 0 { continue } cgoCheckArg(f.typ, add(p, f.offset), true, top, msg) @@ -645,5 +645,5 @@ func cgoCheckResult(val any) { ep := efaceOf(&val) t := ep._type - cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail) + cgoCheckArg(t, ep.data, t.Kind_&kindDirectIface == 0, false, cgoResultFail) } diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index ee8537e2a6..4d5683b54f 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -70,7 +70,7 @@ func cgoCheckPtrWrite(dst *unsafe.Pointer, src unsafe.Pointer) { //go:nosplit //go:nowritebarrier func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) { - cgoCheckMemmove2(typ, dst, src, 0, typ.size) + cgoCheckMemmove2(typ, dst, src, 0, typ.Size_) } // cgoCheckMemmove2 is called when moving a block of memory. @@ -82,7 +82,7 @@ func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) { //go:nosplit //go:nowritebarrier func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { - if typ.ptrdata == 0 { + if typ.PtrBytes == 0 { return } if !cgoIsGoPointer(src) { @@ -103,7 +103,7 @@ func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { //go:nosplit //go:nowritebarrier func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { - if typ.ptrdata == 0 { + if typ.PtrBytes == 0 { return } if !cgoIsGoPointer(src) { @@ -114,8 +114,8 @@ func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { } p := src for i := 0; i < n; i++ { - cgoCheckTypedBlock(typ, p, 0, typ.size) - p = add(p, typ.size) + cgoCheckTypedBlock(typ, p, 0, typ.Size_) + p = add(p, typ.Size_) } } @@ -126,16 +126,16 @@ func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { //go:nosplit //go:nowritebarrier func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) { - // Anything past typ.ptrdata is not a pointer. - if typ.ptrdata <= off { + // Anything past typ.PtrBytes is not a pointer. + if typ.PtrBytes <= off { return } - if ptrdataSize := typ.ptrdata - off; size > ptrdataSize { + if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize { size = ptrdataSize } - if typ.kind&kindGCProg == 0 { - cgoCheckBits(src, typ.gcdata, off, size) + if typ.Kind_&kindGCProg == 0 { + cgoCheckBits(src, typ.GCData, off, size) return } @@ -226,37 +226,37 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { //go:nowritebarrier //go:systemstack func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { - if typ.ptrdata == 0 { + if typ.PtrBytes == 0 { return } - // Anything past typ.ptrdata is not a pointer. - if typ.ptrdata <= off { + // Anything past typ.PtrBytes is not a pointer. + if typ.PtrBytes <= off { return } - if ptrdataSize := typ.ptrdata - off; size > ptrdataSize { + if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize { size = ptrdataSize } - if typ.kind&kindGCProg == 0 { - cgoCheckBits(src, typ.gcdata, off, size) + if typ.Kind_&kindGCProg == 0 { + cgoCheckBits(src, typ.GCData, off, size) return } - switch typ.kind & kindMask { + switch typ.Kind_ & kindMask { default: throw("can't happen") case kindArray: at := (*arraytype)(unsafe.Pointer(typ)) for i := uintptr(0); i < at.len; i++ { - if off < at.elem.size { + if off < at.elem.Size_ { cgoCheckUsingType(at.elem, src, off, size) } - src = add(src, at.elem.size) + src = add(src, at.elem.Size_) skipped := off - if skipped > at.elem.size { - skipped = at.elem.size + if skipped > at.elem.Size_ { + skipped = at.elem.Size_ } - checked := at.elem.size - skipped + checked := at.elem.Size_ - skipped off -= skipped if size <= checked { return @@ -266,15 +266,15 @@ func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { case kindStruct: st := (*structtype)(unsafe.Pointer(typ)) for _, f := range st.fields { - if off < f.typ.size { + if off < f.typ.Size_ { cgoCheckUsingType(f.typ, src, off, size) } - src = add(src, f.typ.size) + src = add(src, f.typ.Size_) skipped := off - if skipped > f.typ.size { - skipped = f.typ.size + if skipped > f.typ.Size_ { + skipped = f.typ.Size_ } - checked := f.typ.size - skipped + checked := f.typ.Size_ - skipped off -= skipped if size <= checked { return diff --git a/src/runtime/chan.go b/src/runtime/chan.go index 6a0ad35b86..db8ed8c863 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -73,14 +73,14 @@ func makechan(t *chantype, size int) *hchan { elem := t.elem // compiler checks this but be safe. - if elem.size >= 1<<16 { + if elem.Size_ >= 1<<16 { throw("makechan: invalid channel element type") } - if hchanSize%maxAlign != 0 || elem.align > maxAlign { + if hchanSize%maxAlign != 0 || elem.Align_ > maxAlign { throw("makechan: bad alignment") } - mem, overflow := math.MulUintptr(elem.size, uintptr(size)) + mem, overflow := math.MulUintptr(elem.Size_, uintptr(size)) if overflow || mem > maxAlloc-hchanSize || size < 0 { panic(plainError("makechan: size out of range")) } @@ -96,7 +96,7 @@ func makechan(t *chantype, size int) *hchan { c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. c.buf = c.raceaddr() - case elem.ptrdata == 0: + case elem.PtrBytes == 0: // Elements do not contain pointers. // Allocate hchan and buf in one call. c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) @@ -107,13 +107,13 @@ func makechan(t *chantype, size int) *hchan { c.buf = mallocgc(mem, elem, true) } - c.elemsize = uint16(elem.size) + c.elemsize = uint16(elem.Size_) c.elemtype = elem c.dataqsiz = uint(size) lockInit(&c.lock, lockRankHchan) if debugChan { - print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n") + print("makechan: chan=", c, "; elemsize=", elem.Size_, "; dataqsiz=", size, "\n") } return c } @@ -339,10 +339,10 @@ func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) { // be updated if the destination's stack gets copied (shrunk). // So make sure that no preemption points can happen between read & use. dst := sg.elem - typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) + typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) // No need for cgo write barrier checks because dst is always // Go memory. - memmove(dst, src, t.size) + memmove(dst, src, t.Size_) } func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { @@ -350,8 +350,8 @@ func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) { // The channel is locked, so src will not move during this // operation. src := sg.elem - typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size) - memmove(dst, src, t.size) + typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) + memmove(dst, src, t.Size_) } func closechan(c *hchan) { diff --git a/src/runtime/checkptr.go b/src/runtime/checkptr.go index 2d4afd5cf6..3c49645a44 100644 --- a/src/runtime/checkptr.go +++ b/src/runtime/checkptr.go @@ -16,13 +16,13 @@ func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) { // Note that we allow unaligned pointers if the types they point to contain // no pointers themselves. See issue 37298. // TODO(mdempsky): What about fieldAlign? - if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 { + if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 { throw("checkptr: misaligned pointer conversion") } // Check that (*[n]elem)(p) doesn't straddle multiple heap objects. // TODO(mdempsky): Fix #46938 so we don't need to worry about overflow here. - if checkptrStraddles(p, n*elem.size) { + if checkptrStraddles(p, n*elem.Size_) { throw("checkptr: converted pointer straddles multiple allocations") } } diff --git a/src/runtime/debuglog.go b/src/runtime/debuglog.go index b18774e6c0..873f1b45bd 100644 --- a/src/runtime/debuglog.go +++ b/src/runtime/debuglog.go @@ -277,7 +277,7 @@ func (l *dlogger) p(x any) *dlogger { l.w.uvarint(0) } else { v := efaceOf(&x) - switch v._type.kind & kindMask { + switch v._type.Kind_ & kindMask { case kindChan, kindFunc, kindMap, kindPtr, kindUnsafePointer: l.w.uvarint(uint64(uintptr(v.data))) default: diff --git a/src/runtime/error.go b/src/runtime/error.go index a211fbf515..933c3cbec3 100644 --- a/src/runtime/error.go +++ b/src/runtime/error.go @@ -258,7 +258,7 @@ func printanycustomtype(i any) { eface := efaceOf(&i) typestring := eface._type.string() - switch eface._type.kind { + switch eface._type.Kind_ { case kindString: print(typestring, `("`, *(*string)(eface.data), `")`) case kindBool: diff --git a/src/runtime/export_debug_test.go b/src/runtime/export_debug_test.go index 2d8a133409..632b325912 100644 --- a/src/runtime/export_debug_test.go +++ b/src/runtime/export_debug_test.go @@ -32,19 +32,19 @@ func InjectDebugCall(gp *g, fn any, regArgs *abi.RegArgs, stackArgs any, tkill f } f := efaceOf(&fn) - if f._type == nil || f._type.kind&kindMask != kindFunc { + if f._type == nil || f._type.Kind_&kindMask != kindFunc { return nil, plainError("fn must be a function") } fv := (*funcval)(f.data) a := efaceOf(&stackArgs) - if a._type != nil && a._type.kind&kindMask != kindPtr { + if a._type != nil && a._type.Kind_&kindMask != kindPtr { return nil, plainError("args must be a pointer or nil") } argp := a.data var argSize uintptr if argp != nil { - argSize = (*ptrtype)(unsafe.Pointer(a._type)).elem.size + argSize = (*ptrtype)(unsafe.Pointer(a._type)).elem.Size_ } h := new(debugCallHandler) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index c7c111ce47..cfd7805b5e 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -233,10 +233,10 @@ func BenchSetType(n int, x any) { t := e._type var size uintptr var p unsafe.Pointer - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindPtr: t = (*ptrtype)(unsafe.Pointer(t)).elem - size = t.size + size = t.Size_ p = e.data case kindSlice: slice := *(*struct { @@ -244,7 +244,7 @@ func BenchSetType(n int, x any) { len, cap uintptr })(e.data) t = (*slicetype)(unsafe.Pointer(t)).elem - size = t.size * slice.len + size = t.Size_ * slice.len p = slice.ptr } allocSize := roundupsize(size) @@ -1754,7 +1754,7 @@ func NewUserArena() *UserArena { func (a *UserArena) New(out *any) { i := efaceOf(out) typ := i._type - if typ.kind&kindMask != kindPtr { + if typ.Kind_&kindMask != kindPtr { panic("new result of non-ptr type") } typ = (*ptrtype)(unsafe.Pointer(typ)).elem diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 74107a2095..70fc5fb34a 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -168,7 +168,7 @@ func dumptype(t *_type) { // If we've definitely serialized the type before, // no need to do it again. - b := &typecache[t.hash&(typeCacheBuckets-1)] + b := &typecache[t.Hash&(typeCacheBuckets-1)] if t == b.t[0] { return } @@ -193,7 +193,7 @@ func dumptype(t *_type) { // dump the type dumpint(tagType) dumpint(uint64(uintptr(unsafe.Pointer(t)))) - dumpint(uint64(t.size)) + dumpint(uint64(t.Size_)) if x := t.uncommon(); x == nil || t.nameOff(x.pkgpath).name() == "" { dumpstr(t.string()) } else { @@ -204,7 +204,7 @@ func dumptype(t *_type) { dwritebyte('.') dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name))) } - dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0) + dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0) } // dump an object. diff --git a/src/runtime/iface.go b/src/runtime/iface.go index f8acbf4ca7..a4ce88ee17 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -28,7 +28,7 @@ type itabTableType struct { func itabHashFunc(inter *interfacetype, typ *_type) uintptr { // compiler has provided some good hash codes for us. - return uintptr(inter.typ.hash ^ typ.hash) + return uintptr(inter.typ.Hash ^ typ.Hash) } func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { @@ -37,7 +37,7 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { } // easy case - if typ.tflag&tflagUncommon == 0 { + if typ.TFlag&abi.TFlagUncommon == 0 { if canfail { return nil } @@ -323,12 +323,12 @@ func convT(t *_type, v unsafe.Pointer) unsafe.Pointer { raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT)) } if msanenabled { - msanread(v, t.size) + msanread(v, t.Size_) } if asanenabled { - asanread(v, t.size) + asanread(v, t.Size_) } - x := mallocgc(t.size, t, true) + x := mallocgc(t.Size_, t, true) typedmemmove(t, x, v) return x } @@ -338,14 +338,14 @@ func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer { raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr)) } if msanenabled { - msanread(v, t.size) + msanread(v, t.Size_) } if asanenabled { - asanread(v, t.size) + asanread(v, t.Size_) } - x := mallocgc(t.size, t, false) - memmove(x, v, t.size) + x := mallocgc(t.Size_, t, false) + memmove(x, v, t.Size_) return x } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index b53e10a435..c3a196e496 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1019,7 +1019,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } var span *mspan var x unsafe.Pointer - noscan := typ == nil || typ.ptrdata == 0 + noscan := typ == nil || typ.PtrBytes == 0 // In some cases block zeroing can profitably (for latency reduction purposes) // be delayed till preemption is possible; delayedZeroing tracks that state. delayedZeroing := false @@ -1142,15 +1142,15 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if !noscan { var scanSize uintptr heapBitsSetType(uintptr(x), size, dataSize, typ) - if dataSize > typ.size { + if dataSize > typ.Size_ { // Array allocation. If there are any // pointers, GC has to scan to the last // element. - if typ.ptrdata != 0 { - scanSize = dataSize - typ.size + typ.ptrdata + if typ.PtrBytes != 0 { + scanSize = dataSize - typ.Size_ + typ.PtrBytes } } else { - scanSize = typ.ptrdata + scanSize = typ.PtrBytes } c.scanAlloc += scanSize } @@ -1321,25 +1321,25 @@ func memclrNoHeapPointersChunked(size uintptr, x unsafe.Pointer) { // compiler (both frontend and SSA backend) knows the signature // of this function. func newobject(typ *_type) unsafe.Pointer { - return mallocgc(typ.size, typ, true) + return mallocgc(typ.Size_, typ, true) } //go:linkname reflect_unsafe_New reflect.unsafe_New func reflect_unsafe_New(typ *_type) unsafe.Pointer { - return mallocgc(typ.size, typ, true) + return mallocgc(typ.Size_, typ, true) } //go:linkname reflectlite_unsafe_New internal/reflectlite.unsafe_New func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { - return mallocgc(typ.size, typ, true) + return mallocgc(typ.Size_, typ, true) } // newarray allocates an array of n elements of type typ. func newarray(typ *_type, n int) unsafe.Pointer { if n == 1 { - return mallocgc(typ.size, typ, true) + return mallocgc(typ.Size_, typ, true) } - mem, overflow := math.MulUintptr(typ.size, uintptr(n)) + mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) if overflow || mem > maxAlloc || n < 0 { panic(plainError("runtime: allocation size out of range")) } diff --git a/src/runtime/map.go b/src/runtime/map.go index e98860fe7a..54206ece9b 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -264,7 +264,7 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap { ovf = (*bmap)(newobject(t.bucket)) } h.incrnoverflow() - if t.bucket.ptrdata == 0 { + if t.bucket.PtrBytes == 0 { h.createOverflow() *h.extra.overflow = append(*h.extra.overflow, ovf) } @@ -303,7 +303,7 @@ func makemap_small() *hmap { // If h != nil, the map can be created directly in h. // If h.buckets != nil, bucket pointed to can be used as the first bucket. func makemap(t *maptype, hint int, h *hmap) *hmap { - mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.size) + mem, overflow := math.MulUintptr(uintptr(hint), t.bucket.Size_) if overflow || mem > maxAlloc { hint = 0 } @@ -353,10 +353,10 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un // required to insert the median number of elements // used with this value of b. nbuckets += bucketShift(b - 4) - sz := t.bucket.size * nbuckets + sz := t.bucket.Size_ * nbuckets up := roundupsize(sz) if up != sz { - nbuckets = up / t.bucket.size + nbuckets = up / t.bucket.Size_ } } @@ -367,8 +367,8 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un // the above newarray(t.bucket, int(nbuckets)) // but may not be empty. buckets = dirtyalloc - size := t.bucket.size * nbuckets - if t.bucket.ptrdata != 0 { + size := t.bucket.Size_ * nbuckets + if t.bucket.PtrBytes != 0 { memclrHasPointers(buckets, size) } else { memclrNoHeapPointers(buckets, size) @@ -401,10 +401,10 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled && h != nil { - msanread(key, t.key.size) + msanread(key, t.key.Size_) } if asanenabled && h != nil { - asanread(key, t.key.size) + asanread(key, t.key.Size_) } if h == nil || h.count == 0 { if t.hashMightPanic() { @@ -442,7 +442,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if t.key.equal(key, k) { + if t.key.Equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -462,10 +462,10 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled && h != nil { - msanread(key, t.key.size) + msanread(key, t.key.Size_) } if asanenabled && h != nil { - asanread(key, t.key.size) + asanread(key, t.key.Size_) } if h == nil || h.count == 0 { if t.hashMightPanic() { @@ -503,7 +503,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if t.key.equal(key, k) { + if t.key.Equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -547,7 +547,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if t.key.equal(key, k) { + if t.key.Equal(key, k) { e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { e = *((*unsafe.Pointer)(e)) @@ -587,10 +587,10 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled { - msanread(key, t.key.size) + msanread(key, t.key.Size_) } if asanenabled { - asanread(key, t.key.size) + asanread(key, t.key.Size_) } if h.flags&hashWriting != 0 { fatal("concurrent map writes") @@ -634,7 +634,7 @@ bucketloop: if t.indirectkey() { k = *((*unsafe.Pointer)(k)) } - if !t.key.equal(key, k) { + if !t.key.Equal(key, k) { continue } // already have a mapping for key. Update it. @@ -701,10 +701,10 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { raceReadObjectPC(t.key, key, callerpc, pc) } if msanenabled && h != nil { - msanread(key, t.key.size) + msanread(key, t.key.Size_) } if asanenabled && h != nil { - asanread(key, t.key.size) + asanread(key, t.key.Size_) } if h == nil || h.count == 0 { if t.hashMightPanic() { @@ -743,22 +743,22 @@ search: if t.indirectkey() { k2 = *((*unsafe.Pointer)(k2)) } - if !t.key.equal(key, k2) { + if !t.key.Equal(key, k2) { continue } // Only clear key if there are pointers in it. if t.indirectkey() { *(*unsafe.Pointer)(k) = nil - } else if t.key.ptrdata != 0 { - memclrHasPointers(k, t.key.size) + } else if t.key.PtrBytes != 0 { + memclrHasPointers(k, t.key.Size_) } e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize)) if t.indirectelem() { *(*unsafe.Pointer)(e) = nil - } else if t.elem.ptrdata != 0 { - memclrHasPointers(e, t.elem.size) + } else if t.elem.PtrBytes != 0 { + memclrHasPointers(e, t.elem.Size_) } else { - memclrNoHeapPointers(e, t.elem.size) + memclrNoHeapPointers(e, t.elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, @@ -832,7 +832,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { // grab snapshot of bucket state it.B = h.B it.buckets = h.buckets - if t.bucket.ptrdata == 0 { + if t.bucket.PtrBytes == 0 { // Allocate the current slice and remember pointers to both current and old. // This preserves all relevant overflow buckets alive even if // the table grows and/or overflow buckets are added to the table @@ -931,7 +931,7 @@ next: // through the oldbucket, skipping any keys that will go // to the other new bucket (each oldbucket expands to two // buckets during a grow). - if t.reflexivekey() || t.key.equal(k, k) { + if t.reflexivekey() || t.key.Equal(k, k) { // If the item in the oldbucket is not destined for // the current new bucket in the iteration, skip it. hash := t.hasher(k, uintptr(h.hash0)) @@ -952,7 +952,7 @@ next: } } if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || - !(t.reflexivekey() || t.key.equal(k, k)) { + !(t.reflexivekey() || t.key.Equal(k, k)) { // This is the golden data, we can return it. // OR // key!=key, so the entry can't be deleted or updated, so we can just return it. @@ -1210,7 +1210,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { // Compute hash to make our evacuation decision (whether we need // to send this key/elem to bucket x or bucket y). hash := t.hasher(k2, uintptr(h.hash0)) - if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) { + if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.Equal(k2, k2) { // If key != key (NaNs), then the hash could be (and probably // will be) entirely different from the old hash. Moreover, // it isn't reproducible. Reproducibility is required in the @@ -1265,7 +1265,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 { b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) // Preserve b.tophash because the evacuation // state is maintained there. @@ -1309,36 +1309,36 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { //go:linkname reflect_makemap reflect.makemap func reflect_makemap(t *maptype, cap int) *hmap { // Check invariants and reflects math. - if t.key.equal == nil { + if t.key.Equal == nil { throw("runtime.reflect_makemap: unsupported map key type") } - if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) || - t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) { + if t.key.Size_ > maxKeySize && (!t.indirectkey() || t.keysize != uint8(goarch.PtrSize)) || + t.key.Size_ <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.Size_)) { throw("key size wrong") } - if t.elem.size > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) || - t.elem.size <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.size)) { + if t.elem.Size_ > maxElemSize && (!t.indirectelem() || t.elemsize != uint8(goarch.PtrSize)) || + t.elem.Size_ <= maxElemSize && (t.indirectelem() || t.elemsize != uint8(t.elem.Size_)) { throw("elem size wrong") } - if t.key.align > bucketCnt { + if t.key.Align_ > bucketCnt { throw("key align too big") } - if t.elem.align > bucketCnt { + if t.elem.Align_ > bucketCnt { throw("elem align too big") } - if t.key.size%uintptr(t.key.align) != 0 { + if t.key.Size_%uintptr(t.key.Align_) != 0 { throw("key size not a multiple of key align") } - if t.elem.size%uintptr(t.elem.align) != 0 { + if t.elem.Size_%uintptr(t.elem.Align_) != 0 { throw("elem size not a multiple of elem align") } if bucketCnt < 8 { throw("bucketsize too small for proper alignment") } - if dataOffset%uintptr(t.key.align) != 0 { + if dataOffset%uintptr(t.key.Align_) != 0 { throw("need padding in bucket (key)") } - if dataOffset%uintptr(t.elem.align) != 0 { + if dataOffset%uintptr(t.elem.Align_) != 0 { throw("need padding in bucket (elem)") } diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go index 01ea330950..01a10f520b 100644 --- a/src/runtime/map_fast32.go +++ b/src/runtime/map_fast32.go @@ -302,16 +302,16 @@ search: // Only clear key if there are pointers in it. // This can only happen if pointers are 32 bit // wide as 64 bit pointers do not fit into a 32 bit key. - if goarch.PtrSize == 4 && t.key.ptrdata != 0 { + if goarch.PtrSize == 4 && t.key.PtrBytes != 0 { // The key must be a pointer as we checked pointers are // 32 bits wide and the key is 32 bits wide also. *(*unsafe.Pointer)(k) = nil } e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)) - if t.elem.ptrdata != 0 { - memclrHasPointers(e, t.elem.size) + if t.elem.PtrBytes != 0 { + memclrHasPointers(e, t.elem.Size_) } else { - memclrNoHeapPointers(e, t.elem.size) + memclrNoHeapPointers(e, t.elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, @@ -428,7 +428,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check // Copy key. - if goarch.PtrSize == 4 && t.key.ptrdata != 0 && writeBarrier.enabled { + if goarch.PtrSize == 4 && t.key.PtrBytes != 0 && writeBarrier.enabled { // Write with a write barrier. *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) } else { @@ -446,7 +446,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 { b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go index 2967360b76..0dd90183a4 100644 --- a/src/runtime/map_fast64.go +++ b/src/runtime/map_fast64.go @@ -300,7 +300,7 @@ search: continue } // Only clear key if there are pointers in it. - if t.key.ptrdata != 0 { + if t.key.PtrBytes != 0 { if goarch.PtrSize == 8 { *(*unsafe.Pointer)(k) = nil } else { @@ -310,10 +310,10 @@ search: } } e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)) - if t.elem.ptrdata != 0 { - memclrHasPointers(e, t.elem.size) + if t.elem.PtrBytes != 0 { + memclrHasPointers(e, t.elem.Size_) } else { - memclrNoHeapPointers(e, t.elem.size) + memclrNoHeapPointers(e, t.elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, @@ -430,7 +430,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check // Copy key. - if t.key.ptrdata != 0 && writeBarrier.enabled { + if t.key.PtrBytes != 0 && writeBarrier.enabled { if goarch.PtrSize == 8 { // Write with a write barrier. *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) @@ -454,7 +454,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 { b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index 006c24cee2..03a4fac169 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -336,10 +336,10 @@ search: // Clear key's pointer. k.str = nil e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)) - if t.elem.ptrdata != 0 { - memclrHasPointers(e, t.elem.size) + if t.elem.PtrBytes != 0 { + memclrHasPointers(e, t.elem.Size_) } else { - memclrNoHeapPointers(e, t.elem.size) + memclrNoHeapPointers(e, t.elem.Size_) } b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, @@ -469,7 +469,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 { + if h.flags&oldIterator == 0 && t.bucket.PtrBytes != 0 { b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index e367d8f524..ed6df2d55e 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -159,8 +159,8 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) { if dst == src { return } - if writeBarrier.needed && typ.ptrdata != 0 { - bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata) + if writeBarrier.needed && typ.PtrBytes != 0 { + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes) } // There's a race here: if some other goroutine can write to // src, it may change some pointer in src after we've @@ -169,9 +169,9 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) { // other goroutine must also be accompanied by a write // barrier, so at worst we've unnecessarily greyed the old // pointer that was in src. - memmove(dst, src, typ.size) + memmove(dst, src, typ.Size_) if goexperiment.CgoCheck2 { - cgoCheckMemmove2(typ, dst, src, 0, typ.size) + cgoCheckMemmove2(typ, dst, src, 0, typ.Size_) } } @@ -182,7 +182,7 @@ func typedmemmove(typ *_type, dst, src unsafe.Pointer) { //go:nowritebarrierrec //go:nosplit func wbZero(typ *_type, dst unsafe.Pointer) { - bulkBarrierPreWrite(uintptr(dst), 0, typ.ptrdata) + bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes) } // wbMove performs the write barrier operations necessary before @@ -192,7 +192,7 @@ func wbZero(typ *_type, dst unsafe.Pointer) { //go:nowritebarrierrec //go:nosplit func wbMove(typ *_type, dst, src unsafe.Pointer) { - bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.ptrdata) + bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes) } //go:linkname reflect_typedmemmove reflect.typedmemmove @@ -202,12 +202,12 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) { raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove)) } if msanenabled { - msanwrite(dst, typ.size) - msanread(src, typ.size) + msanwrite(dst, typ.Size_) + msanread(src, typ.Size_) } if asanenabled { - asanwrite(dst, typ.size) - asanread(src, typ.size) + asanwrite(dst, typ.Size_) + asanread(src, typ.Size_) } typedmemmove(typ, dst, src) } @@ -228,7 +228,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { // //go:nosplit func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { - if writeBarrier.needed && typ != nil && typ.ptrdata != 0 && size >= goarch.PtrSize { + if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize { bulkBarrierPreWrite(uintptr(dst), uintptr(src), size) } memmove(dst, src, size) @@ -258,16 +258,16 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe if raceenabled { callerpc := getcallerpc() pc := abi.FuncPCABIInternal(slicecopy) - racewriterangepc(dstPtr, uintptr(n)*typ.size, callerpc, pc) - racereadrangepc(srcPtr, uintptr(n)*typ.size, callerpc, pc) + racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc) + racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc) } if msanenabled { - msanwrite(dstPtr, uintptr(n)*typ.size) - msanread(srcPtr, uintptr(n)*typ.size) + msanwrite(dstPtr, uintptr(n)*typ.Size_) + msanread(srcPtr, uintptr(n)*typ.Size_) } if asanenabled { - asanwrite(dstPtr, uintptr(n)*typ.size) - asanread(srcPtr, uintptr(n)*typ.size) + asanwrite(dstPtr, uintptr(n)*typ.Size_) + asanread(srcPtr, uintptr(n)*typ.Size_) } if goexperiment.CgoCheck2 { @@ -278,13 +278,13 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe return n } - // Note: No point in checking typ.ptrdata here: + // Note: No point in checking typ.PtrBytes here: // compiler only emits calls to typedslicecopy for types with pointers, // and growslice and reflect_typedslicecopy check for pointers // before calling typedslicecopy. - size := uintptr(n) * typ.size + size := uintptr(n) * typ.Size_ if writeBarrier.needed { - pwsize := size - typ.size + typ.ptrdata + pwsize := size - typ.Size_ + typ.PtrBytes bulkBarrierPreWrite(uintptr(dstPtr), uintptr(srcPtr), pwsize) } // See typedmemmove for a discussion of the race between the @@ -295,8 +295,8 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe //go:linkname reflect_typedslicecopy reflect.typedslicecopy func reflect_typedslicecopy(elemType *_type, dst, src slice) int { - if elemType.ptrdata == 0 { - return slicecopy(dst.array, dst.len, src.array, src.len, elemType.size) + if elemType.PtrBytes == 0 { + return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_) } return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len) } @@ -313,10 +313,10 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int { // //go:nosplit func typedmemclr(typ *_type, ptr unsafe.Pointer) { - if writeBarrier.needed && typ.ptrdata != 0 { - bulkBarrierPreWrite(uintptr(ptr), 0, typ.ptrdata) + if writeBarrier.needed && typ.PtrBytes != 0 { + bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes) } - memclrNoHeapPointers(ptr, typ.size) + memclrNoHeapPointers(ptr, typ.Size_) } //go:linkname reflect_typedmemclr reflect.typedmemclr @@ -326,7 +326,7 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { - if writeBarrier.needed && typ.ptrdata != 0 { + if writeBarrier.needed && typ.PtrBytes != 0 { bulkBarrierPreWrite(uintptr(ptr), 0, size) } memclrNoHeapPointers(ptr, size) @@ -334,8 +334,8 @@ func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintpt //go:linkname reflect_typedarrayclear reflect.typedarrayclear func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { - size := typ.size * uintptr(len) - if writeBarrier.needed && typ.ptrdata != 0 { + size := typ.Size_ * uintptr(len) + if writeBarrier.needed && typ.PtrBytes != 0 { bulkBarrierPreWrite(uintptr(ptr), 0, size) } memclrNoHeapPointers(ptr, size) @@ -343,7 +343,7 @@ func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { // memclrHasPointers clears n bytes of typed memory starting at ptr. // The caller must ensure that the type of the object at ptr has -// pointers, usually by checking typ.ptrdata. However, ptr +// pointers, usually by checking typ.PtrBytes. However, ptr // does not have to point to the start of the allocation. // //go:nosplit diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index ac20bd9ade..f8ce5fd006 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -526,7 +526,7 @@ func (h heapBits) nextFast() (heapBits, uintptr) { // The pointer bitmap is not maintained for allocations containing // no pointers at all; any caller of bulkBarrierPreWrite must first // make sure the underlying allocation contains pointers, usually -// by checking typ.ptrdata. +// by checking typ.PtrBytes. // // Callers must perform cgo checks if goexperiment.CgoCheck2. // @@ -682,21 +682,21 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { if typ == nil { throw("runtime: typeBitsBulkBarrier without type") } - if typ.size != size { - println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size) + if typ.Size_ != size { + println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.Size_, " but memory size", size) throw("runtime: invalid typeBitsBulkBarrier") } - if typ.kind&kindGCProg != 0 { + if typ.Kind_&kindGCProg != 0 { println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog") throw("runtime: invalid typeBitsBulkBarrier") } if !writeBarrier.needed { return } - ptrmask := typ.gcdata + ptrmask := typ.GCData buf := &getg().m.p.ptr().wbBuf var bits uint32 - for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize { + for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize { if i&(goarch.PtrSize*8-1) == 0 { bits = uint32(*ptrmask) ptrmask = addb(ptrmask, 1) @@ -915,7 +915,7 @@ func readUintptr(p *byte) uintptr { // heapBitsSetType records that the new allocation [x, x+size) // holds in [x, x+dataSize) one or more values of type typ. -// (The number of values is given by dataSize / typ.size.) +// (The number of values is given by dataSize / typ.Size.) // If dataSize < size, the fragment [x+dataSize, x+size) is // recorded as non-pointer data. // It is known that the type has pointers somewhere; @@ -939,8 +939,8 @@ func readUintptr(p *byte) uintptr { func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { const doubleCheck = false // slow but helpful; enable to test modifications to this code - if doubleCheck && dataSize%typ.size != 0 { - throw("heapBitsSetType: dataSize not a multiple of typ.size") + if doubleCheck && dataSize%typ.Size_ != 0 { + throw("heapBitsSetType: dataSize not a multiple of typ.Size") } if goarch.PtrSize == 8 && size == goarch.PtrSize { @@ -965,12 +965,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { h := writeHeapBitsForAddr(x) // Handle GC program. - if typ.kind&kindGCProg != 0 { + if typ.Kind_&kindGCProg != 0 { // Expand the gc program into the storage we're going to use for the actual object. obj := (*uint8)(unsafe.Pointer(x)) - n := runGCProg(addb(typ.gcdata, 4), obj) + n := runGCProg(addb(typ.GCData, 4), obj) // Use the expanded program to set the heap bits. - for i := uintptr(0); true; i += typ.size { + for i := uintptr(0); true; i += typ.Size_ { // Copy expanded program to heap bitmap. p := obj j := n @@ -981,12 +981,12 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { } h = h.write(uintptr(*p), j) - if i+typ.size == dataSize { + if i+typ.Size_ == dataSize { break // no padding after last element } // Pad with zeros to the start of the next element. - h = h.pad(typ.size - n*goarch.PtrSize) + h = h.pad(typ.Size_ - n*goarch.PtrSize) } h.flush(x, size) @@ -998,16 +998,16 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { // Note about sizes: // - // typ.size is the number of words in the object, - // and typ.ptrdata is the number of words in the prefix + // typ.Size is the number of words in the object, + // and typ.PtrBytes is the number of words in the prefix // of the object that contains pointers. That is, the final - // typ.size - typ.ptrdata words contain no pointers. + // typ.Size - typ.PtrBytes words contain no pointers. // This allows optimization of a common pattern where // an object has a small header followed by a large scalar // buffer. If we know the pointers are over, we don't have // to scan the buffer's heap bitmap at all. // The 1-bit ptrmasks are sized to contain only bits for - // the typ.ptrdata prefix, zero padded out to a full byte + // the typ.PtrBytes prefix, zero padded out to a full byte // of bitmap. If there is more room in the allocated object, // that space is pointerless. The noMorePtrs bitmap will prevent // scanning large pointerless tails of an object. @@ -1016,13 +1016,13 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { // objects with scalar tails, all but the last tail does have to // be initialized, because there is no way to say "skip forward". - ptrs := typ.ptrdata / goarch.PtrSize - if typ.size == dataSize { // Single element + ptrs := typ.PtrBytes / goarch.PtrSize + if typ.Size_ == dataSize { // Single element if ptrs <= ptrBits { // Single small element - m := readUintptr(typ.gcdata) + m := readUintptr(typ.GCData) h = h.write(m, ptrs) } else { // Single large element - p := typ.gcdata + p := typ.GCData for { h = h.write(readUintptr(p), ptrBits) p = addb(p, ptrBits/8) @@ -1035,10 +1035,10 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { h = h.write(m, ptrs) } } else { // Repeated element - words := typ.size / goarch.PtrSize // total words, including scalar tail - if words <= ptrBits { // Repeated small element - n := dataSize / typ.size - m := readUintptr(typ.gcdata) + words := typ.Size_ / goarch.PtrSize // total words, including scalar tail + if words <= ptrBits { // Repeated small element + n := dataSize / typ.Size_ + m := readUintptr(typ.GCData) // Make larger unit to repeat for words <= ptrBits/2 { if n&1 != 0 { @@ -1058,8 +1058,8 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { } h = h.write(m, ptrs) } else { // Repeated large element - for i := uintptr(0); true; i += typ.size { - p := typ.gcdata + for i := uintptr(0); true; i += typ.Size_ { + p := typ.GCData j := ptrs for j > ptrBits { h = h.write(readUintptr(p), ptrBits) @@ -1068,11 +1068,11 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { } m := readUintptr(p) h = h.write(m, j) - if i+typ.size == dataSize { + if i+typ.Size_ == dataSize { break // don't need the trailing nonptr bits on the last element. } // Pad with zeros to the start of the next element. - h = h.pad(typ.size - typ.ptrdata) + h = h.pad(typ.Size_ - typ.PtrBytes) } } } @@ -1084,10 +1084,10 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { // Compute the pointer bit we want at offset i. want := false if i < dataSize { - off := i % typ.size - if off < typ.ptrdata { + off := i % typ.Size_ + if off < typ.PtrBytes { j := off / goarch.PtrSize - want = *addb(typ.gcdata, j/8)>>(j%8)&1 != 0 + want = *addb(typ.GCData, j/8)>>(j%8)&1 != 0 } } if want { @@ -1417,7 +1417,7 @@ func getgcmask(ep any) (mask []byte) { // data if datap.data <= uintptr(p) && uintptr(p) < datap.edata { bitmap := datap.gcdatamask.bytedata - n := (*ptrtype)(unsafe.Pointer(t)).elem.size + n := (*ptrtype)(unsafe.Pointer(t)).elem.Size_ mask = make([]byte, n/goarch.PtrSize) for i := uintptr(0); i < n; i += goarch.PtrSize { off := (uintptr(p) + i - datap.data) / goarch.PtrSize @@ -1429,7 +1429,7 @@ func getgcmask(ep any) (mask []byte) { // bss if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { bitmap := datap.gcbssmask.bytedata - n := (*ptrtype)(unsafe.Pointer(t)).elem.size + n := (*ptrtype)(unsafe.Pointer(t)).elem.Size_ mask = make([]byte, n/goarch.PtrSize) for i := uintptr(0); i < n; i += goarch.PtrSize { off := (uintptr(p) + i - datap.bss) / goarch.PtrSize @@ -1477,7 +1477,7 @@ func getgcmask(ep any) (mask []byte) { return } size := uintptr(locals.n) * goarch.PtrSize - n := (*ptrtype)(unsafe.Pointer(t)).elem.size + n := (*ptrtype)(unsafe.Pointer(t)).elem.Size_ mask = make([]byte, n/goarch.PtrSize) for i := uintptr(0); i < n; i += goarch.PtrSize { off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index d4d4f1f302..e25c06bcf8 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -234,7 +234,7 @@ func runfinq() { // confusing the write barrier. *(*[2]uintptr)(frame) = [2]uintptr{} } - switch f.fint.kind & kindMask { + switch f.fint.Kind_ & kindMask { case kindPtr: // direct use of pointer *(*unsafe.Pointer)(r) = f.arg @@ -371,7 +371,7 @@ func SetFinalizer(obj any, finalizer any) { if etyp == nil { throw("runtime.SetFinalizer: first argument is nil") } - if etyp.kind&kindMask != kindPtr { + if etyp.Kind_&kindMask != kindPtr { throw("runtime.SetFinalizer: first argument is " + etyp.string() + ", not pointer") } ot := (*ptrtype)(unsafe.Pointer(etyp)) @@ -415,7 +415,7 @@ func SetFinalizer(obj any, finalizer any) { if uintptr(e.data) != base { // As an implementation detail we allow to set finalizers for an inner byte // of an object if it could come from tiny alloc (see mallocgc for details). - if ot.elem == nil || ot.elem.ptrdata != 0 || ot.elem.size >= maxTinySize { + if ot.elem == nil || ot.elem.PtrBytes != 0 || ot.elem.Size_ >= maxTinySize { throw("runtime.SetFinalizer: pointer not at beginning of allocated block") } } @@ -430,7 +430,7 @@ func SetFinalizer(obj any, finalizer any) { return } - if ftyp.kind&kindMask != kindFunc { + if ftyp.Kind_&kindMask != kindFunc { throw("runtime.SetFinalizer: second argument is " + ftyp.string() + ", not a function") } ft := (*functype)(unsafe.Pointer(ftyp)) @@ -445,13 +445,13 @@ func SetFinalizer(obj any, finalizer any) { case fint == etyp: // ok - same type goto okarg - case fint.kind&kindMask == kindPtr: + case fint.Kind_&kindMask == kindPtr: if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem { // ok - not same type, but both pointers, // one or the other is unnamed, and same element type, so assignable. goto okarg } - case fint.kind&kindMask == kindInterface: + case fint.Kind_&kindMask == kindInterface: ityp := (*interfacetype)(unsafe.Pointer(fint)) if len(ityp.mhdr) == 0 { // ok - satisfies empty interface @@ -466,7 +466,7 @@ okarg: // compute size needed for return parameters nret := uintptr(0) for _, t := range ft.out() { - nret = alignUp(nret, uintptr(t.align)) + uintptr(t.size) + nret = alignUp(nret, uintptr(t.Align_)) + uintptr(t.Size_) } nret = alignUp(nret, goarch.PtrSize) diff --git a/src/runtime/plugin.go b/src/runtime/plugin.go index 312802de00..d2ad1ed21c 100644 --- a/src/runtime/plugin.go +++ b/src/runtime/plugin.go @@ -85,7 +85,7 @@ func plugin_lastmoduleinit() (path string, syms map[string]any, initTasks []*ini (*valp)[0] = unsafe.Pointer(t) name := symName.name() - if t.kind&kindMask == kindFunc { + if t.Kind_&kindMask == kindFunc { name = "." + name } syms[name] = val diff --git a/src/runtime/race.go b/src/runtime/race.go index 9120db28da..c03866fd94 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -93,11 +93,11 @@ const raceenabled = true // callerpc is a return PC of the function that calls this function, // pc is start PC of the function that calls this function. func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.kind & kindMask + kind := t.Kind_ & kindMask if kind == kindArray || kind == kindStruct { // for composite objects we have to read every address // because a write might happen to any subobject. - racereadrangepc(addr, t.size, callerpc, pc) + racereadrangepc(addr, t.Size_, callerpc, pc) } else { // for non-composite objects we can read just the start // address, as any write must write the first byte. @@ -106,11 +106,11 @@ func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { } func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { - kind := t.kind & kindMask + kind := t.Kind_ & kindMask if kind == kindArray || kind == kindStruct { // for composite objects we have to write every address // because a write might happen to any subobject. - racewriterangepc(addr, t.size, callerpc, pc) + racewriterangepc(addr, t.Size_, callerpc, pc) } else { // for non-composite objects we can write just the start // address, as any write must write the first byte. diff --git a/src/runtime/select.go b/src/runtime/select.go index 1072465365..339db75d4a 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -400,16 +400,16 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo } if msanenabled { if casi < nsends { - msanread(cas.elem, c.elemtype.size) + msanread(cas.elem, c.elemtype.Size_) } else if cas.elem != nil { - msanwrite(cas.elem, c.elemtype.size) + msanwrite(cas.elem, c.elemtype.Size_) } } if asanenabled { if casi < nsends { - asanread(cas.elem, c.elemtype.size) + asanread(cas.elem, c.elemtype.Size_) } else if cas.elem != nil { - asanwrite(cas.elem, c.elemtype.size) + asanwrite(cas.elem, c.elemtype.Size_) } } @@ -425,10 +425,10 @@ bufrecv: racenotify(c, c.recvx, nil) } if msanenabled && cas.elem != nil { - msanwrite(cas.elem, c.elemtype.size) + msanwrite(cas.elem, c.elemtype.Size_) } if asanenabled && cas.elem != nil { - asanwrite(cas.elem, c.elemtype.size) + asanwrite(cas.elem, c.elemtype.Size_) } recvOK = true qp = chanbuf(c, c.recvx) @@ -451,10 +451,10 @@ bufsend: raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { - msanread(cas.elem, c.elemtype.size) + msanread(cas.elem, c.elemtype.Size_) } if asanenabled { - asanread(cas.elem, c.elemtype.size) + asanread(cas.elem, c.elemtype.Size_) } typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem) c.sendx++ @@ -492,10 +492,10 @@ send: raceReadObjectPC(c.elemtype, cas.elem, casePC(casi), chansendpc) } if msanenabled { - msanread(cas.elem, c.elemtype.size) + msanread(cas.elem, c.elemtype.Size_) } if asanenabled { - asanread(cas.elem, c.elemtype.size) + asanread(cas.elem, c.elemtype.Size_) } send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) if debugSelect { diff --git a/src/runtime/slice.go b/src/runtime/slice.go index 04062f59fc..228697a708 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -39,21 +39,21 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf var tomem, copymem uintptr if uintptr(tolen) > uintptr(fromlen) { var overflow bool - tomem, overflow = math.MulUintptr(et.size, uintptr(tolen)) + tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) if overflow || tomem > maxAlloc || tolen < 0 { panicmakeslicelen() } - copymem = et.size * uintptr(fromlen) + copymem = et.Size_ * uintptr(fromlen) } else { // fromlen is a known good length providing and equal or greater than tolen, // thereby making tolen a good slice length too as from and to slices have the // same element width. - tomem = et.size * uintptr(tolen) + tomem = et.Size_ * uintptr(tolen) copymem = tomem } var to unsafe.Pointer - if et.ptrdata == 0 { + if et.PtrBytes == 0 { to = mallocgc(tomem, nil, false) if copymem < tomem { memclrNoHeapPointers(add(to, copymem), tomem-copymem) @@ -86,14 +86,14 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf } func makeslice(et *_type, len, cap int) unsafe.Pointer { - mem, overflow := math.MulUintptr(et.size, uintptr(cap)) + mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) if overflow || mem > maxAlloc || len < 0 || len > cap { // NOTE: Produce a 'len out of range' error instead of a // 'cap out of range' error when someone does make([]T, bignumber). // 'cap out of range' is true too, but since the cap is only being // supplied implicitly, saying len is clearer. // See golang.org/issue/4085. - mem, overflow := math.MulUintptr(et.size, uintptr(len)) + mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) if overflow || mem > maxAlloc || len < 0 { panicmakeslicelen() } @@ -158,20 +158,20 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice oldLen := newLen - num if raceenabled { callerpc := getcallerpc() - racereadrangepc(oldPtr, uintptr(oldLen*int(et.size)), callerpc, abi.FuncPCABIInternal(growslice)) + racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) } if msanenabled { - msanread(oldPtr, uintptr(oldLen*int(et.size))) + msanread(oldPtr, uintptr(oldLen*int(et.Size_))) } if asanenabled { - asanread(oldPtr, uintptr(oldLen*int(et.size))) + asanread(oldPtr, uintptr(oldLen*int(et.Size_))) } if newLen < 0 { panic(errorString("growslice: len out of range")) } - if et.size == 0 { + if et.Size_ == 0 { // append should not create a slice with nil pointer but non-zero len. // We assume that append doesn't need to preserve oldPtr in this case. return slice{unsafe.Pointer(&zerobase), newLen, newLen} @@ -204,30 +204,30 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice var overflow bool var lenmem, newlenmem, capmem uintptr - // Specialize for common values of et.size. + // Specialize for common values of et.Size. // For 1 we don't need any division/multiplication. // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. // For powers of 2, use a variable shift. switch { - case et.size == 1: + case et.Size_ == 1: lenmem = uintptr(oldLen) newlenmem = uintptr(newLen) capmem = roundupsize(uintptr(newcap)) overflow = uintptr(newcap) > maxAlloc newcap = int(capmem) - case et.size == goarch.PtrSize: + case et.Size_ == goarch.PtrSize: lenmem = uintptr(oldLen) * goarch.PtrSize newlenmem = uintptr(newLen) * goarch.PtrSize capmem = roundupsize(uintptr(newcap) * goarch.PtrSize) overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize newcap = int(capmem / goarch.PtrSize) - case isPowerOfTwo(et.size): + case isPowerOfTwo(et.Size_): var shift uintptr if goarch.PtrSize == 8 { // Mask shift for better code generation. - shift = uintptr(sys.TrailingZeros64(uint64(et.size))) & 63 + shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 } else { - shift = uintptr(sys.TrailingZeros32(uint32(et.size))) & 31 + shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 } lenmem = uintptr(oldLen) << shift newlenmem = uintptr(newLen) << shift @@ -236,12 +236,12 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice newcap = int(capmem >> shift) capmem = uintptr(newcap) << shift default: - lenmem = uintptr(oldLen) * et.size - newlenmem = uintptr(newLen) * et.size - capmem, overflow = math.MulUintptr(et.size, uintptr(newcap)) + lenmem = uintptr(oldLen) * et.Size_ + newlenmem = uintptr(newLen) * et.Size_ + capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) capmem = roundupsize(capmem) - newcap = int(capmem / et.size) - capmem = uintptr(newcap) * et.size + newcap = int(capmem / et.Size_) + capmem = uintptr(newcap) * et.Size_ } // The check of overflow in addition to capmem > maxAlloc is needed @@ -262,7 +262,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice } var p unsafe.Pointer - if et.ptrdata == 0 { + if et.PtrBytes == 0 { p = mallocgc(capmem, nil, false) // The append() that calls growslice is going to overwrite from oldLen to newLen. // Only clear the part that will not be overwritten. @@ -275,7 +275,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice if lenmem > 0 && writeBarrier.enabled { // Only shade the pointers in oldPtr since we know the destination slice p // only contains nil pointers because it has been cleared during alloc. - bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.size+et.ptrdata) + bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes) } } memmove(p, oldPtr, lenmem) @@ -293,9 +293,9 @@ func reflect_growslice(et *_type, old slice, num int) slice { // the memory will be overwritten by an append() that called growslice. // Since the caller of reflect_growslice is not append(), // zero out this region before returning the slice to the reflect package. - if et.ptrdata == 0 { - oldcapmem := uintptr(old.cap) * et.size - newlenmem := uintptr(new.len) * et.size + if et.PtrBytes == 0 { + oldcapmem := uintptr(old.cap) * et.Size_ + newlenmem := uintptr(new.len) * et.Size_ memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) } new.len = old.len // preserve the old length diff --git a/src/runtime/stkframe.go b/src/runtime/stkframe.go index 9c8f4be453..5caacbacba 100644 --- a/src/runtime/stkframe.go +++ b/src/runtime/stkframe.go @@ -264,7 +264,7 @@ var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjecti func stkobjinit() { var abiRegArgsEface any = abi.RegArgs{} abiRegArgsType := efaceOf(&abiRegArgsEface)._type - if abiRegArgsType.kind&kindGCProg != 0 { + if abiRegArgsType.Kind_&kindGCProg != 0 { throw("abiRegArgsType needs GC Prog, update methodValueCallFrameObjs") } // Set methodValueCallFrameObjs[0].gcdataoff so that @@ -281,9 +281,9 @@ func stkobjinit() { throw("methodValueCallFrameObjs is not in a module") } methodValueCallFrameObjs[0] = stackObjectRecord{ - off: -int32(alignUp(abiRegArgsType.size, 8)), // It's always the highest address local. - size: int32(abiRegArgsType.size), - _ptrdata: int32(abiRegArgsType.ptrdata), - gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.gcdata)) - mod.rodata), + off: -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local. + size: int32(abiRegArgsType.Size_), + _ptrdata: int32(abiRegArgsType.PtrBytes), + gcdataoff: uint32(uintptr(unsafe.Pointer(abiRegArgsType.GCData)) - mod.rodata), } } diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index 947f68510c..1c112be9d4 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -91,7 +91,7 @@ type abiDesc struct { } func (p *abiDesc) assignArg(t *_type) { - if t.size > goarch.PtrSize { + if t.Size_ > goarch.PtrSize { // We don't support this right now. In // stdcall/cdecl, 64-bit ints and doubles are // passed as two words (little endian); and @@ -103,7 +103,7 @@ func (p *abiDesc) assignArg(t *_type) { // registers and the stack. panic("compileCallback: argument size is larger than uintptr") } - if k := t.kind & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) { + if k := t.Kind_ & kindMask; GOARCH != "386" && (k == kindFloat32 || k == kindFloat64) { // In fastcall, floating-point arguments in // the first four positions are passed in // floating-point registers, which we don't @@ -114,9 +114,9 @@ func (p *abiDesc) assignArg(t *_type) { panic("compileCallback: float arguments not supported") } - if t.size == 0 { + if t.Size_ == 0 { // The Go ABI aligns for zero-sized types. - p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align)) + p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) return } @@ -134,15 +134,15 @@ func (p *abiDesc) assignArg(t *_type) { // // TODO(mknyszek): Remove this when we no longer have // caller reserved spill space. - p.dstSpill = alignUp(p.dstSpill, uintptr(t.align)) - p.dstSpill += t.size + p.dstSpill = alignUp(p.dstSpill, uintptr(t.Align_)) + p.dstSpill += t.Size_ } else { // Register assignment failed. // Undo the work and stack assign. p.parts = oldParts // The Go ABI aligns arguments. - p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.align)) + p.dstStackSize = alignUp(p.dstStackSize, uintptr(t.Align_)) // Copy just the size of the argument. Note that this // could be a small by-value struct, but C and Go @@ -152,14 +152,14 @@ func (p *abiDesc) assignArg(t *_type) { kind: abiPartStack, srcStackOffset: p.srcStackSize, dstStackOffset: p.dstStackSize, - len: t.size, + len: t.Size_, } // Add this step to the adapter. if len(p.parts) == 0 || !p.parts[len(p.parts)-1].tryMerge(part) { p.parts = append(p.parts, part) } // The Go ABI packs arguments. - p.dstStackSize += t.size + p.dstStackSize += t.Size_ } // cdecl, stdcall, fastcall, and arm pad arguments to word size. @@ -174,14 +174,14 @@ func (p *abiDesc) assignArg(t *_type) { // // Returns whether the assignment succeeded. func (p *abiDesc) tryRegAssignArg(t *_type, offset uintptr) bool { - switch k := t.kind & kindMask; k { + switch k := t.Kind_ & kindMask; k { case kindBool, kindInt, kindInt8, kindInt16, kindInt32, kindUint, kindUint8, kindUint16, kindUint32, kindUintptr, kindPtr, kindUnsafePointer: // Assign a register for all these types. - return p.assignReg(t.size, offset) + return p.assignReg(t.Size_, offset) case kindInt64, kindUint64: // Only register-assign if the registers are big enough. if goarch.PtrSize == 8 { - return p.assignReg(t.size, offset) + return p.assignReg(t.Size_, offset) } case kindArray: at := (*arraytype)(unsafe.Pointer(t)) @@ -269,7 +269,7 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { cdecl = false } - if fn._type == nil || (fn._type.kind&kindMask) != kindFunc { + if fn._type == nil || (fn._type.Kind_&kindMask) != kindFunc { panic("compileCallback: expected function with one uintptr-sized result") } ft := (*functype)(unsafe.Pointer(fn._type)) @@ -287,10 +287,10 @@ func compileCallback(fn eface, cdecl bool) (code uintptr) { if len(ft.out()) != 1 { panic("compileCallback: expected function with one uintptr-sized result") } - if ft.out()[0].size != goarch.PtrSize { + if ft.out()[0].Size_ != goarch.PtrSize { panic("compileCallback: expected function with one uintptr-sized result") } - if k := ft.out()[0].kind & kindMask; k == kindFloat32 || k == kindFloat64 { + if k := ft.out()[0].Kind_ & kindMask; k == kindFloat32 || k == kindFloat64 { // In cdecl and stdcall, float results are returned in // ST(0). In fastcall, they're returned in XMM0. // Either way, it's not AX. diff --git a/src/runtime/type.go b/src/runtime/type.go index 1c6103e6ed..62dce2c377 100644 --- a/src/runtime/type.go +++ b/src/runtime/type.go @@ -11,59 +11,29 @@ import ( "unsafe" ) -// tflag is documented in reflect/type.go. -// -// tflag values must be kept in sync with copies in: -// -// cmd/compile/internal/reflectdata/reflect.go -// cmd/link/internal/ld/decodesym.go -// reflect/type.go -// internal/reflectlite/type.go -type tflag uint8 - -const ( - tflagUncommon tflag = 1 << 0 - tflagExtraStar tflag = 1 << 1 - tflagNamed tflag = 1 << 2 - tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes -) +type nameOff = abi.NameOff +type typeOff = abi.TypeOff +type textOff = abi.TextOff // Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize, // ../cmd/compile/internal/reflectdata/reflect.go:/^func.dcommontype and // ../reflect/type.go:/^type.rtype. // ../internal/reflectlite/type.go:/^type.rtype. -type _type struct { - size uintptr - ptrdata uintptr // size of memory prefix holding all pointers - hash uint32 - tflag tflag - align uint8 - fieldAlign uint8 - kind uint8 - // function for comparing objects of this type - // (ptr to object A, ptr to object B) -> ==? - equal func(unsafe.Pointer, unsafe.Pointer) bool - // gcdata stores the GC type data for the garbage collector. - // If the KindGCProg bit is set in kind, gcdata is a GC program. - // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. - gcdata *byte - str nameOff - ptrToThis typeOff -} +type _type abi.Type func (t *_type) string() string { - s := t.nameOff(t.str).name() - if t.tflag&tflagExtraStar != 0 { + s := t.nameOff(t.Str).name() + if t.TFlag&abi.TFlagExtraStar != 0 { return s[1:] } return s } func (t *_type) uncommon() *uncommontype { - if t.tflag&tflagUncommon == 0 { + if t.TFlag&abi.TFlagUncommon == 0 { return nil } - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindStruct: type u struct { structtype @@ -122,7 +92,7 @@ func (t *_type) uncommon() *uncommontype { } func (t *_type) name() string { - if t.tflag&tflagNamed == 0 { + if t.TFlag&abi.TFlagNamed == 0 { return "" } s := t.string() @@ -148,7 +118,7 @@ func (t *_type) pkgpath() string { if u := t.uncommon(); u != nil { return t.nameOff(u.pkgpath).name() } - switch t.kind & kindMask { + switch t.Kind_ & kindMask { case kindStruct: st := (*structtype)(unsafe.Pointer(t)) return st.pkgPath.name() @@ -303,7 +273,7 @@ func (t *_type) textOff(off textOff) unsafe.Pointer { func (t *functype) in() []*_type { // See funcType in reflect/type.go for details on data layout. uadd := uintptr(unsafe.Sizeof(functype{})) - if t.typ.tflag&tflagUncommon != 0 { + if t.typ.TFlag&abi.TFlagUncommon != 0 { uadd += unsafe.Sizeof(uncommontype{}) } return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount] @@ -312,7 +282,7 @@ func (t *functype) in() []*_type { func (t *functype) out() []*_type { // See funcType in reflect/type.go for details on data layout. uadd := uintptr(unsafe.Sizeof(functype{})) - if t.typ.tflag&tflagUncommon != 0 { + if t.typ.TFlag&abi.TFlagUncommon != 0 { uadd += unsafe.Sizeof(uncommontype{}) } outCount := t.outCount & (1<<15 - 1) @@ -323,10 +293,6 @@ func (t *functype) dotdotdot() bool { return t.outCount&(1<<15) != 0 } -type nameOff int32 -type typeOff int32 -type textOff int32 - type method struct { name nameOff mtyp typeOff @@ -519,13 +485,13 @@ func typelinksinit() { t = prev.typemap[typeOff(tl)] } // Add to typehash if not seen before. - tlist := typehash[t.hash] + tlist := typehash[t.Hash] for _, tcur := range tlist { if tcur == t { continue collect } } - typehash[t.hash] = append(tlist, t) + typehash[t.Hash] = append(tlist, t) } if md.typemap == nil { @@ -537,7 +503,7 @@ func typelinksinit() { md.typemap = tm for _, tl := range md.typelinks { t := (*_type)(unsafe.Pointer(md.types + uintptr(tl))) - for _, candidate := range typehash[t.hash] { + for _, candidate := range typehash[t.Hash] { seen := map[_typePair]struct{}{} if typesEqual(t, candidate, seen) { t = candidate @@ -583,8 +549,8 @@ func typesEqual(t, v *_type, seen map[_typePair]struct{}) bool { if t == v { return true } - kind := t.kind & kindMask - if kind != v.kind&kindMask { + kind := t.Kind_ & kindMask + if kind != v.Kind_&kindMask { return false } if t.string() != v.string() { diff --git a/src/runtime/typekind.go b/src/runtime/typekind.go index 7087a9b046..bd2dec94c4 100644 --- a/src/runtime/typekind.go +++ b/src/runtime/typekind.go @@ -39,5 +39,5 @@ const ( // isDirectIface reports whether t is stored directly in an interface value. func isDirectIface(t *_type) bool { - return t.kind&kindDirectIface != 0 + return t.Kind_&kindDirectIface != 0 } diff --git a/src/runtime/unsafe.go b/src/runtime/unsafe.go index d2773bc56d..6675264f59 100644 --- a/src/runtime/unsafe.go +++ b/src/runtime/unsafe.go @@ -55,13 +55,13 @@ func unsafeslice(et *_type, ptr unsafe.Pointer, len int) { panicunsafeslicelen1(getcallerpc()) } - if et.size == 0 { + if et.Size_ == 0 { if ptr == nil && len > 0 { panicunsafeslicenilptr1(getcallerpc()) } } - mem, overflow := math.MulUintptr(et.size, uintptr(len)) + mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) if overflow || mem > -uintptr(ptr) { if ptr == nil { panicunsafeslicenilptr1(getcallerpc()) @@ -84,7 +84,7 @@ func unsafeslicecheckptr(et *_type, ptr unsafe.Pointer, len64 int64) { // Check that underlying array doesn't straddle multiple heap objects. // unsafeslice64 has already checked for overflow. - if checkptrStraddles(ptr, uintptr(len64)*et.size) { + if checkptrStraddles(ptr, uintptr(len64)*et.Size_) { throw("checkptr: unsafe.Slice result straddles multiple allocations") } } |
