aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc_generated.go
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2025-09-25 13:33:58 -0400
committerCherry Mui <cherryyz@google.com>2025-09-25 13:33:59 -0400
commita693ae1e9aebac896f6634583dbdd1cd319f3983 (patch)
tree44ef04e84afe5ef8652222c5500ab6c779d09650 /src/runtime/malloc_generated.go
parent5a78e1a4a1c79185e86b5c18efffba2a9b9d3739 (diff)
parentd70ad4e740e24b4b76961c4b56d698fa23668aa2 (diff)
downloadgo-a693ae1e9aebac896f6634583dbdd1cd319f3983.tar.xz
[dev.simd] all: merge master (d70ad4e) into dev.simd
Conflicts: - src/cmd/compile/internal/types2/stdlib_test.go - src/go/types/stdlib_test.go Merge List: + 2025-09-25 d70ad4e740 sync/atomic: correct Uintptr.Or return doc + 2025-09-25 d7abfe4f0d runtime: acquire/release C TSAN lock when calling cgo symbolizer/tracebacker + 2025-09-25 393d91aea0 cmd/fix: remove all functionality + 2025-09-25 6dceff8bad cmd/link: handle -w flag in external linking mode + 2025-09-25 76d088eb74 cmd/internal/obj/riscv: remove ACFLWSP/ACFSWSP and ACFLW/ACFSW + 2025-09-25 5225e9dc49 doc/next: document new image/jpeg DCT in release notes + 2025-09-25 81a83bba21 cmd: update x/tools@4df13e3 + 2025-09-25 6b32c613ca go/types: make typeset return an iterator + 2025-09-25 fbba930271 image/jpeg: replace fdct.go and idct.go with new implementation in dct.go + 2025-09-25 92e093467f image/jpeg: correct and test reference slowFDCT and slowIDCT + 2025-09-25 27c7bbc51c image/jpeg: prepare for new FDCT/IDCT implementations + 2025-09-24 f15cd63ec4 cmd/compile: don't rely on loop info when there are irreducible loops + 2025-09-24 371c1d2fcb cmd/internal/obj/riscv: add support for vector unit-stride fault-only-first load instructions + 2025-09-23 411c250d64 runtime: add specialized malloc functions for sizes up to 512 bytes + 2025-09-23 d7a38adf4c runtime: eliminate global span queue [green tea] + 2025-09-23 7bc1935db5 cmd/compile/internal: support new(expr) + 2025-09-23 eb78f13c9f doc/go_spec.html: document new(expr) + 2025-09-23 74cc463f9e go/token: add TestRemovedFileFileReturnsNil test + 2025-09-23 902dc27ae9 go/token: clear cache after grabbing the mutex in RemoveFile + 2025-09-23 a13d085a5b cmd/cgo: don't hardcode section name in TestNumberOfExportedFunctions + 2025-09-23 61bf26a9ee cmd/link: fix Macho-O X86_64_RELOC_SUBTRACTOR in internal linking + 2025-09-23 4b787c8c2b reflect: remove stale comment in unpackEface + 2025-09-23 3df27cd21a cmd/compile: fix typo in comment + 2025-09-23 684e8d3363 reflect: allocate memory in TypeAssert[I] only when the assertion succeeds + 2025-09-23 a5866ebe40 cmd/compile: prevent shapifying of pointer shape type + 2025-09-23 a27261c42f go/types,types2: allow new(expr) + 2025-09-23 e93f439ac4 runtime/cgo: retry when CreateThread fails with ERROR_ACCESS_DENIED + 2025-09-23 69e74b0aac runtime: deduplicate pMask resize code + 2025-09-23 fde10c4ce7 runtime: split gcMarkWorkAvailable into two separate conditions + 2025-09-23 5d040df092 runtime: use scan kernels in scanSpan [green tea] + 2025-09-23 7e0251bf58 runtime: don't report non-blocked goroutines as "(durable)" in stacks + 2025-09-23 22ac328856 cmd/link: make -w behavior consistent on Windows Change-Id: Id76b5a30a3b6f6669437f97e3320c9bca65a1e96
Diffstat (limited to 'src/runtime/malloc_generated.go')
-rw-r--r--src/runtime/malloc_generated.go8468
1 files changed, 8468 insertions, 0 deletions
diff --git a/src/runtime/malloc_generated.go b/src/runtime/malloc_generated.go
new file mode 100644
index 0000000000..600048c675
--- /dev/null
+++ b/src/runtime/malloc_generated.go
@@ -0,0 +1,8468 @@
+// Code generated by mkmalloc.go; DO NOT EDIT.
+
+package runtime
+
+import (
+ "internal/goarch"
+ "internal/runtime/sys"
+ "unsafe"
+)
+
+func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 1
+
+ const elemsize = 8
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 8 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 8
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 2
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 16
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 3
+
+ const elemsize = 24
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 24 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 24
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 4
+
+ const elemsize = 32
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 32 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 32
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 5
+
+ const elemsize = 48
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 48 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 48
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 6
+
+ const elemsize = 64
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 64 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 64
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 7
+
+ const elemsize = 80
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 80 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 80
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 8
+
+ const elemsize = 96
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 96 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 96
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 9
+
+ const elemsize = 112
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 112 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 112
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 10
+
+ const elemsize = 128
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 128 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 128
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 11
+
+ const elemsize = 144
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 144 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 144
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 12
+
+ const elemsize = 160
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 160 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 160
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 13
+
+ const elemsize = 176
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 176 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 176
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 14
+
+ const elemsize = 192
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 192 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 192
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 15
+
+ const elemsize = 208
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 208 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 208
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 16
+
+ const elemsize = 224
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 224 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 224
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 17
+
+ const elemsize = 240
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 240 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 240
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 18
+
+ const elemsize = 256
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 256 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 256
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 19
+
+ const elemsize = 288
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 288 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 288
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 20
+
+ const elemsize = 320
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 320 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 320
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 21
+
+ const elemsize = 352
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 352 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 352
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 22
+
+ const elemsize = 384
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 384 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 384
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 23
+
+ const elemsize = 416
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 416 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 416
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 24
+
+ const elemsize = 448
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 448 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 448
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 25
+
+ const elemsize = 480
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 480 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 480
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 26
+
+ const elemsize = 512
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallScanNoHeader(size, typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(0)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 512 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+
+ c.scanAlloc += 8
+ } else {
+ dataSize := size
+ x := uintptr(x)
+
+ if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) {
+ throw("tried to write heap bits, but no heap bits in span")
+ }
+
+ src0 := readUintptr(getGCMask(typ))
+
+ const elemsize = 512
+
+ scanSize := typ.PtrBytes
+ src := src0
+ if typ.Size_ == goarch.PtrSize {
+ src = (1 << (dataSize / goarch.PtrSize)) - 1
+ } else {
+
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
+ for i := typ.Size_; i < dataSize; i += typ.Size_ {
+ src |= src0 << (i / goarch.PtrSize)
+ scanSize += typ.Size_
+ }
+ }
+
+ dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize)
+ dst := unsafe.Pointer(dstBase)
+ o := (x - span.base()) / goarch.PtrSize
+ i := o / ptrBits
+ j := o % ptrBits
+ const bits uintptr = elemsize / goarch.PtrSize
+
+ const bitsIsPowerOfTwo = bits&(bits-1) == 0
+ if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) {
+
+ bits0 := ptrBits - j
+ bits1 := bits - bits0
+ dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize))
+ dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))
+ *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j)
+ *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0)
+ } else {
+
+ dst := (*uintptr)(add(dst, i*goarch.PtrSize))
+ *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j)
+ }
+
+ const doubleCheck = false
+ if doubleCheck {
+ writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ)
+ }
+ if doubleCheckHeapSetType {
+ doubleCheckHeapType(x, dataSize, typ, nil, span)
+ }
+ c.scanAlloc += scanSize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 1
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 2
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 3
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 4
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 5
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 6
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 7
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 8
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 9
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 10
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 11
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 12
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 13
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 14
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const constsize = 15
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckTiny(constsize, typ, mp)
+ }
+ mp.mallocing = 1
+
+ c := getMCache(mp)
+ off := c.tinyoffset
+
+ if constsize&7 == 0 {
+ off = alignUp(off, 8)
+ } else if goarch.PtrSize == 4 && constsize == 12 {
+
+ off = alignUp(off, 8)
+ } else if constsize&3 == 0 {
+ off = alignUp(off, 4)
+ } else if constsize&1 == 0 {
+ off = alignUp(off, 2)
+ }
+ if off+constsize <= maxTinySize && c.tiny != 0 {
+
+ x := unsafe.Pointer(c.tiny + off)
+ c.tinyoffset = off + constsize
+ c.tinyAllocs++
+ mp.mallocing = 0
+ releasem(mp)
+ const elemsize = 0
+ {
+
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+ }
+
+ }
+
+ checkGCTrigger := false
+ span := c.alloc[tinySpanClass]
+
+ const nbytes = 8192
+ const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) /
+ 16,
+ )
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(tinySpanClass)
+ }
+ x := unsafe.Pointer(v)
+ (*[2]uint64)(x)[0] = 0
+ (*[2]uint64)(x)[1] = 0
+
+ if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) {
+
+ c.tiny = uintptr(x)
+ c.tinyoffset = constsize
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+
+ if raceenabled {
+
+ x = add(x, elemsize-constsize)
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 2
+
+ const elemsize = 16
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 16 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 3
+
+ const elemsize = 24
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 24 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 4
+
+ const elemsize = 32
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 32 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 5
+
+ const elemsize = 48
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 48 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 6
+
+ const elemsize = 64
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 64 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 7
+
+ const elemsize = 80
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 80 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 8
+
+ const elemsize = 96
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 96 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 9
+
+ const elemsize = 112
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 112 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 10
+
+ const elemsize = 128
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 128 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 11
+
+ const elemsize = 144
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 144 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 12
+
+ const elemsize = 160
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 160 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 13
+
+ const elemsize = 176
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 176 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 14
+
+ const elemsize = 192
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 192 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 15
+
+ const elemsize = 208
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 208 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 16
+
+ const elemsize = 224
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 224 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 17
+
+ const elemsize = 240
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 240 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 18
+
+ const elemsize = 256
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 256 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 19
+
+ const elemsize = 288
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 288 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 20
+
+ const elemsize = 320
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 320 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 21
+
+ const elemsize = 352
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 352 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 22
+
+ const elemsize = 384
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 384 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 23
+
+ const elemsize = 416
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 416 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 24
+
+ const elemsize = 448
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 448 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 25
+
+ const elemsize = 480
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 480 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}
+
+func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
+ if doubleCheckMalloc {
+ if gcphase == _GCmarktermination {
+ throw("mallocgc called with gcphase == _GCmarktermination")
+ }
+ }
+
+ lockRankMayQueueFinalizer()
+
+ if debug.malloc {
+ if x := preMallocgcDebug(size, typ); x != nil {
+ return x
+ }
+ }
+
+ if gcBlackenEnabled != 0 {
+ deductAssistCredit(size)
+ }
+
+ const sizeclass = 26
+
+ const elemsize = 512
+
+ mp := acquirem()
+ if doubleCheckMalloc {
+ doubleCheckSmallNoScan(typ, mp)
+ }
+ mp.mallocing = 1
+
+ checkGCTrigger := false
+ c := getMCache(mp)
+ const spc = spanClass(sizeclass<<1) | spanClass(1)
+ span := c.alloc[spc]
+
+ var nextFreeFastResult gclinkptr
+ if span.allocCache != 0 {
+ theBit := sys.TrailingZeros64(span.allocCache)
+ result := span.freeindex + uint16(theBit)
+ if result < span.nelems {
+ freeidx := result + 1
+ if !(freeidx%64 == 0 && freeidx != span.nelems) {
+ span.allocCache >>= uint(theBit + 1)
+ span.freeindex = freeidx
+ span.allocCount++
+ nextFreeFastResult = gclinkptr(uintptr(result)*
+ 512 +
+ span.base())
+ }
+ }
+ }
+ v := nextFreeFastResult
+ if v == 0 {
+ v, span, checkGCTrigger = c.nextFree(spc)
+ }
+ x := unsafe.Pointer(v)
+ if needzero && span.needzero != 0 {
+ memclrNoHeapPointers(x, elemsize)
+ }
+
+ publicationBarrier()
+
+ if writeBarrier.enabled {
+
+ gcmarknewobject(span, uintptr(x))
+ } else {
+
+ span.freeIndexForScan = span.freeindex
+ }
+
+ c.nextSample -= int64(elemsize)
+ if c.nextSample < 0 || MemProfileRate != c.memProfRate {
+ profilealloc(mp, x, elemsize)
+ }
+ mp.mallocing = 0
+ releasem(mp)
+
+ if checkGCTrigger {
+ if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
+ gcStart(t)
+ }
+ }
+ if gcBlackenEnabled != 0 && elemsize != 0 {
+ if assistG := getg().m.curg; assistG != nil {
+ assistG.gcAssistBytes -= int64(elemsize - size)
+ }
+ }
+
+ if debug.malloc {
+ postMallocgcDebug(x, elemsize, typ)
+ }
+ return x
+}