aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2025-03-04 19:02:48 +0000
committerGopher Robot <gobot@golang.org>2025-04-23 08:00:33 -0700
commit528bafa0498bb26a3b3961fa5bf50d02bd7101bb (patch)
treeeb72406f4a0ce690d368b2377e2df031457775ca /src/runtime
parentecdd429a3be7abde6e169b79da13bffdba064cb4 (diff)
downloadgo-528bafa0498bb26a3b3961fa5bf50d02bd7101bb.tar.xz
runtime: move sizeclass defs to new package internal/runtime/gc
We will want to reference these definitions from new generator programs, and this is a good opportunity to cleanup all these old C-style names. Change-Id: Ifb06f0afc381e2697e7877f038eca786610c96de Reviewed-on: https://go-review.googlesource.com/c/go/+/655275 Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/export_test.go7
-rw-r--r--src/runtime/heapdump.go5
-rw-r--r--src/runtime/malloc.go42
-rw-r--r--src/runtime/mcache.go9
-rw-r--r--src/runtime/mcentral.go11
-rw-r--r--src/runtime/metrics.go11
-rw-r--r--src/runtime/mgcsweep.go6
-rw-r--r--src/runtime/mheap.go13
-rw-r--r--src/runtime/mksizeclasses.go357
-rw-r--r--src/runtime/mpagealloc.go3
-rw-r--r--src/runtime/msize.go8
-rw-r--r--src/runtime/mstats.go23
-rw-r--r--src/runtime/sizeclasses.go99
-rw-r--r--src/runtime/stack.go9
-rw-r--r--src/runtime/traceallocfree.go5
15 files changed, 81 insertions, 527 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index 8da4ece881..572f62c2f9 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -11,6 +11,7 @@ import (
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
@@ -363,7 +364,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
slow.Mallocs = 0
slow.Frees = 0
slow.HeapReleased = 0
- var bySize [_NumSizeClasses]struct {
+ var bySize [gc.NumSizeClasses]struct {
Mallocs, Frees uint64
}
@@ -391,11 +392,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
// Collect per-sizeclass free stats.
var smallFree uint64
- for i := 0; i < _NumSizeClasses; i++ {
+ for i := 0; i < gc.NumSizeClasses; i++ {
slow.Frees += m.smallFreeCount[i]
bySize[i].Frees += m.smallFreeCount[i]
bySize[i].Mallocs += m.smallFreeCount[i]
- smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
+ smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
}
slow.Frees += m.tinyAllocCount + m.largeFreeCount
slow.Mallocs += slow.Frees
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 6287cccd5f..6e6b58edd5 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -14,6 +14,7 @@ package runtime
import (
"internal/abi"
"internal/goarch"
+ "internal/runtime/gc"
"unsafe"
)
@@ -471,7 +472,7 @@ func dumproots() {
// Bit vector of free marks.
// Needs to be as big as the largest number of objects per span.
-var freemark [_PageSize / 8]bool
+var freemark [pageSize / 8]bool
func dumpobjs() {
// To protect mheap_.allspans.
@@ -483,7 +484,7 @@ func dumpobjs() {
}
p := s.base()
size := s.elemsize
- n := (s.npages << _PageShift) / size
+ n := (s.npages << gc.PageShift) / size
if n > uintptr(len(freemark)) {
throw("freemark array doesn't have enough entries")
}
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 0a48f8bc61..554cfa6fcf 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -104,6 +104,7 @@ import (
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/math"
"internal/runtime/sys"
"unsafe"
@@ -112,13 +113,10 @@ import (
const (
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
- maxSmallSize = _MaxSmallSize
-
- pageShift = _PageShift
- pageSize = _PageSize
-
- _PageSize = 1 << _PageShift
- _PageMask = _PageSize - 1
+ maxSmallSize = gc.MaxSmallSize
+ pageSize = 1 << gc.PageShift
+ pageMask = pageSize - 1
+ _PageSize = pageSize // Unused. Left for viewcore.
// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
_64bit = 1 << (^uintptr(0) >> 63) / 2
@@ -371,7 +369,7 @@ var (
)
func mallocinit() {
- if class_to_size[_TinySizeClass] != _TinySize {
+ if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
throw("bad TinySizeClass")
}
@@ -432,11 +430,11 @@ func mallocinit() {
// span sizes are one page. Some code relies on this.
minSizeForMallocHeaderIsSizeClass := false
sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true
- for i := 0; i < len(class_to_size); i++ {
- if class_to_allocnpages[i] > 1 {
+ for i := 0; i < len(gc.SizeClassToSize); i++ {
+ if gc.SizeClassToNPages[i] > 1 {
sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false
}
- if minSizeForMallocHeader == uintptr(class_to_size[i]) {
+ if minSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) {
minSizeForMallocHeaderIsSizeClass = true
break
}
@@ -1272,12 +1270,12 @@ func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointe
checkGCTrigger := false
c := getMCache(mp)
var sizeclass uint8
- if size <= smallSizeMax-8 {
- sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
+ if size <= gc.SmallSizeMax-8 {
+ sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
} else {
- sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
+ sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
}
- size = uintptr(class_to_size[sizeclass])
+ size = uintptr(gc.SizeClassToSize[sizeclass])
spc := makeSpanClass(sizeclass, true)
span := c.alloc[spc]
v := nextFreeFast(span)
@@ -1360,7 +1358,7 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintpt
checkGCTrigger := false
c := getMCache(mp)
- sizeclass := size_to_class8[divRoundUp(size, smallSizeDiv)]
+ sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
spc := makeSpanClass(sizeclass, false)
span := c.alloc[spc]
v := nextFreeFast(span)
@@ -1378,7 +1376,7 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintpt
} else {
c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
}
- size = uintptr(class_to_size[sizeclass])
+ size = uintptr(gc.SizeClassToSize[sizeclass])
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
@@ -1453,12 +1451,12 @@ func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr)
c := getMCache(mp)
size += mallocHeaderSize
var sizeclass uint8
- if size <= smallSizeMax-8 {
- sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
+ if size <= gc.SmallSizeMax-8 {
+ sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
} else {
- sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
+ sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
}
- size = uintptr(class_to_size[sizeclass])
+ size = uintptr(gc.SizeClassToSize[sizeclass])
spc := makeSpanClass(sizeclass, false)
span := c.alloc[spc]
v := nextFreeFast(span)
@@ -1909,7 +1907,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
if align&(align-1) != 0 {
throw("persistentalloc: align is not a power of 2")
}
- if align > _PageSize {
+ if align > pageSize {
throw("persistentalloc: align is too large")
}
} else {
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index 44d737b19c..440120cdfe 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -6,6 +6,7 @@ package runtime
import (
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
@@ -218,18 +219,18 @@ func (c *mcache) refill(spc spanClass) {
// allocLarge allocates a span for a large object.
func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
- if size+_PageSize < size {
+ if size+pageSize < size {
throw("out of memory")
}
- npages := size >> _PageShift
- if size&_PageMask != 0 {
+ npages := size >> gc.PageShift
+ if size&pageMask != 0 {
npages++
}
// Deduct credit for this span allocation and sweep if
// necessary. mHeap_Alloc will also sweep npages, so this only
// pays the debt down to npage pages.
- deductSweepCredit(npages*_PageSize, npages)
+ deductSweepCredit(npages*pageSize, npages)
spc := makeSpanClass(0, noscan)
s := mheap_.alloc(npages, spc)
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
index 08ff0a5c5d..21731f3fec 100644
--- a/src/runtime/mcentral.go
+++ b/src/runtime/mcentral.go
@@ -14,6 +14,7 @@ package runtime
import (
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/sys"
)
@@ -80,7 +81,7 @@ func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
// Allocate a span to use in an mcache.
func (c *mcentral) cacheSpan() *mspan {
// Deduct credit for this span allocation and sweep if necessary.
- spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
+ spanBytes := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) * pageSize
deductSweepCredit(spanBytes, 0)
traceDone := false
@@ -248,8 +249,8 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
- npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
- size := uintptr(class_to_size[c.spanclass.sizeclass()])
+ npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
+ size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()])
s := mheap_.alloc(npages, c.spanclass)
if s == nil {
@@ -257,8 +258,8 @@ func (c *mcentral) grow() *mspan {
}
// Use division by multiplication and shifts to quickly compute:
- // n := (npages << _PageShift) / size
- n := s.divideByElemSize(npages << _PageShift)
+ // n := (npages << gc.PageShift) / size
+ n := s.divideByElemSize(npages << gc.PageShift)
s.limit = s.base() + size*n
s.initHeapBits()
return s
diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go
index 417f1071bb..949a2d42bd 100644
--- a/src/runtime/metrics.go
+++ b/src/runtime/metrics.go
@@ -8,6 +8,7 @@ package runtime
import (
"internal/godebugs"
+ "internal/runtime/gc"
"unsafe"
)
@@ -62,12 +63,12 @@ func initMetrics() {
return
}
- sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
+ sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
// Skip size class 0 which is a stand-in for large objects, but large
// objects are tracked separately (and they actually get placed in
// the last bucket, not the first).
sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
- for i := 1; i < _NumSizeClasses; i++ {
+ for i := 1; i < gc.NumSizeClasses; i++ {
// Size classes have an inclusive upper-bound
// and exclusive lower bound (e.g. 48-byte size class is
// (32, 48]) whereas we want and inclusive lower-bound
@@ -79,7 +80,7 @@ func initMetrics() {
// value up to 2^53 and size classes are relatively small
// (nowhere near 2^48 even) so this will give us exact
// boundaries.
- sizeClassBuckets[i] = float64(class_to_size[i] + 1)
+ sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
}
sizeClassBuckets = append(sizeClassBuckets, float64Inf())
@@ -615,8 +616,8 @@ func (a *heapStatsAggregate) compute() {
nf := a.smallFreeCount[i]
a.totalAllocs += na
a.totalFrees += nf
- a.totalAllocated += na * uint64(class_to_size[i])
- a.totalFreed += nf * uint64(class_to_size[i])
+ a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
+ a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
}
a.inObjects = a.totalAllocated - a.totalFreed
a.numObjects = a.totalAllocs - a.totalFrees
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index b6890bac47..4fd80a6883 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -517,7 +517,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
trace := traceAcquire()
if trace.ok() {
- trace.GCSweepSpan(s.npages * _PageSize)
+ trace.GCSweepSpan(s.npages * pageSize)
traceRelease(trace)
}
@@ -981,9 +981,9 @@ func gcPaceSweeper(trigger uint64) {
// concurrent sweep are less likely to leave pages
// unswept when GC starts.
heapDistance -= 1024 * 1024
- if heapDistance < _PageSize {
+ if heapDistance < pageSize {
// Avoid setting the sweep ratio extremely high
- heapDistance = _PageSize
+ heapDistance = pageSize
}
pagesSwept := mheap_.pagesSwept.Load()
pagesInUse := mheap_.pagesInUse.Load()
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 50ff68646f..7e6c284423 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -13,6 +13,7 @@ import (
"internal/cpu"
"internal/goarch"
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
@@ -514,7 +515,7 @@ func (s *mspan) base() uintptr {
}
func (s *mspan) layout() (size, n, total uintptr) {
- total = s.npages << _PageShift
+ total = s.npages << gc.PageShift
size = s.elemsize
if size > 0 {
n = total / size
@@ -576,7 +577,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
type spanClass uint8
const (
- numSpanClasses = _NumSizeClasses << 1
+ numSpanClasses = gc.NumSizeClasses << 1
tinySpanClass = spanClass(tinySizeClass<<1 | 1)
)
@@ -1423,14 +1424,14 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.nelems = 1
s.divMul = 0
} else {
- s.elemsize = uintptr(class_to_size[sizeclass])
+ s.elemsize = uintptr(gc.SizeClassToSize[sizeclass])
if !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
// Reserve space for the pointer/scan bitmap at the end.
s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
} else {
s.nelems = uint16(nbytes / s.elemsize)
}
- s.divMul = class_to_divmagic[sizeclass]
+ s.divMul = gc.SizeClassToDivMagic[sizeclass]
}
// Initialize mark and allocation structures.
@@ -1589,13 +1590,13 @@ func (h *mheap) freeSpan(s *mspan) {
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
- bytes := s.npages << _PageShift
+ bytes := s.npages << gc.PageShift
msanfree(base, bytes)
}
if asanenabled {
// Tell asan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
- bytes := s.npages << _PageShift
+ bytes := s.npages << gc.PageShift
asanpoison(base, bytes)
}
h.freeSpanLocked(s, spanAllocHeap)
diff --git a/src/runtime/mksizeclasses.go b/src/runtime/mksizeclasses.go
deleted file mode 100644
index bb06ba1edd..0000000000
--- a/src/runtime/mksizeclasses.go
+++ /dev/null
@@ -1,357 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build ignore
-
-// Generate tables for small malloc size classes.
-//
-// See malloc.go for overview.
-//
-// The size classes are chosen so that rounding an allocation
-// request up to the next size class wastes at most 12.5% (1.125x).
-//
-// Each size class has its own page count that gets allocated
-// and chopped up when new objects of the size class are needed.
-// That page count is chosen so that chopping up the run of
-// pages into objects of the given size wastes at most 12.5% (1.125x)
-// of the memory. It is not necessary that the cutoff here be
-// the same as above.
-//
-// The two sources of waste multiply, so the worst possible case
-// for the above constraints would be that allocations of some
-// size might have a 26.6% (1.266x) overhead.
-// In practice, only one of the wastes comes into play for a
-// given size (sizes < 512 waste mainly on the round-up,
-// sizes > 512 waste mainly on the page chopping).
-// For really small sizes, alignment constraints force the
-// overhead higher.
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io"
- "log"
- "math"
- "math/bits"
- "os"
-)
-
-// Generate msize.go
-
-var stdout = flag.Bool("stdout", false, "write to stdout instead of sizeclasses.go")
-
-func main() {
- flag.Parse()
-
- var b bytes.Buffer
- fmt.Fprintln(&b, "// Code generated by mksizeclasses.go; DO NOT EDIT.")
- fmt.Fprintln(&b, "//go:generate go run mksizeclasses.go")
- fmt.Fprintln(&b)
- fmt.Fprintln(&b, "package runtime")
- classes := makeClasses()
-
- printComment(&b, classes)
-
- printClasses(&b, classes)
-
- out, err := format.Source(b.Bytes())
- if err != nil {
- log.Fatal(err)
- }
- if *stdout {
- _, err = os.Stdout.Write(out)
- } else {
- err = os.WriteFile("sizeclasses.go", out, 0666)
- }
- if err != nil {
- log.Fatal(err)
- }
-}
-
-const (
- // Constants that we use and will transfer to the runtime.
- minHeapAlign = 8
- maxSmallSize = 32 << 10
- smallSizeDiv = 8
- smallSizeMax = 1024
- largeSizeDiv = 128
- pageShift = 13
-
- // Derived constants.
- pageSize = 1 << pageShift
-)
-
-type class struct {
- size int // max size
- npages int // number of pages
-}
-
-func powerOfTwo(x int) bool {
- return x != 0 && x&(x-1) == 0
-}
-
-func makeClasses() []class {
- var classes []class
-
- classes = append(classes, class{}) // class #0 is a dummy entry
-
- align := minHeapAlign
- for size := align; size <= maxSmallSize; size += align {
- if powerOfTwo(size) { // bump alignment once in a while
- if size >= 2048 {
- align = 256
- } else if size >= 128 {
- align = size / 8
- } else if size >= 32 {
- align = 16 // heap bitmaps assume 16 byte alignment for allocations >= 32 bytes.
- }
- }
- if !powerOfTwo(align) {
- panic("incorrect alignment")
- }
-
- // Make the allocnpages big enough that
- // the leftover is less than 1/8 of the total,
- // so wasted space is at most 12.5%.
- allocsize := pageSize
- for allocsize%size > allocsize/8 {
- allocsize += pageSize
- }
- npages := allocsize / pageSize
-
- // If the previous sizeclass chose the same
- // allocation size and fit the same number of
- // objects into the page, we might as well
- // use just this size instead of having two
- // different sizes.
- if len(classes) > 1 && npages == classes[len(classes)-1].npages && allocsize/size == allocsize/classes[len(classes)-1].size {
- classes[len(classes)-1].size = size
- continue
- }
- classes = append(classes, class{size: size, npages: npages})
- }
-
- // Increase object sizes if we can fit the same number of larger objects
- // into the same number of pages. For example, we choose size 8448 above
- // with 6 objects in 7 pages. But we can well use object size 9472,
- // which is also 6 objects in 7 pages but +1024 bytes (+12.12%).
- // We need to preserve at least largeSizeDiv alignment otherwise
- // sizeToClass won't work.
- for i := range classes {
- if i == 0 {
- continue
- }
- c := &classes[i]
- psize := c.npages * pageSize
- new_size := (psize / (psize / c.size)) &^ (largeSizeDiv - 1)
- if new_size > c.size {
- c.size = new_size
- }
- }
-
- if len(classes) != 68 {
- panic("number of size classes has changed")
- }
-
- for i := range classes {
- computeDivMagic(&classes[i])
- }
-
- return classes
-}
-
-// computeDivMagic checks that the division required to compute object
-// index from span offset can be computed using 32-bit multiplication.
-// n / c.size is implemented as (n * (^uint32(0)/uint32(c.size) + 1)) >> 32
-// for all 0 <= n <= c.npages * pageSize
-func computeDivMagic(c *class) {
- // divisor
- d := c.size
- if d == 0 {
- return
- }
-
- // maximum input value for which the formula needs to work.
- max := c.npages * pageSize
-
- // As reported in [1], if n and d are unsigned N-bit integers, we
- // can compute n / d as ⌊n * c / 2^F⌋, where c is ⌈2^F / d⌉ and F is
- // computed with:
- //
- // Algorithm 2: Algorithm to select the number of fractional bits
- // and the scaled approximate reciprocal in the case of unsigned
- // integers.
- //
- // if d is a power of two then
- // Let F ← log₂(d) and c = 1.
- // else
- // Let F ← N + L where L is the smallest integer
- // such that d ≤ (2^(N+L) mod d) + 2^L.
- // end if
- //
- // [1] "Faster Remainder by Direct Computation: Applications to
- // Compilers and Software Libraries" Daniel Lemire, Owen Kaser,
- // Nathan Kurz arXiv:1902.01961
- //
- // To minimize the risk of introducing errors, we implement the
- // algorithm exactly as stated, rather than trying to adapt it to
- // fit typical Go idioms.
- N := bits.Len(uint(max))
- var F int
- if powerOfTwo(d) {
- F = int(math.Log2(float64(d)))
- if d != 1<<F {
- panic("imprecise log2")
- }
- } else {
- for L := 0; ; L++ {
- if d <= ((1<<(N+L))%d)+(1<<L) {
- F = N + L
- break
- }
- }
- }
-
- // Also, noted in the paper, F is the smallest number of fractional
- // bits required. We use 32 bits, because it works for all size
- // classes and is fast on all CPU architectures that we support.
- if F > 32 {
- fmt.Printf("d=%d max=%d N=%d F=%d\n", c.size, max, N, F)
- panic("size class requires more than 32 bits of precision")
- }
-
- // Brute force double-check with the exact computation that will be
- // done by the runtime.
- m := ^uint32(0)/uint32(c.size) + 1
- for n := 0; n <= max; n++ {
- if uint32((uint64(n)*uint64(m))>>32) != uint32(n/c.size) {
- fmt.Printf("d=%d max=%d m=%d n=%d\n", d, max, m, n)
- panic("bad 32-bit multiply magic")
- }
- }
-}
-
-func printComment(w io.Writer, classes []class) {
- fmt.Fprintf(w, "// %-5s %-9s %-10s %-7s %-10s %-9s %-9s\n", "class", "bytes/obj", "bytes/span", "objects", "tail waste", "max waste", "min align")
- prevSize := 0
- var minAligns [pageShift + 1]int
- for i, c := range classes {
- if i == 0 {
- continue
- }
- spanSize := c.npages * pageSize
- objects := spanSize / c.size
- tailWaste := spanSize - c.size*(spanSize/c.size)
- maxWaste := float64((c.size-prevSize-1)*objects+tailWaste) / float64(spanSize)
- alignBits := bits.TrailingZeros(uint(c.size))
- if alignBits > pageShift {
- // object alignment is capped at page alignment
- alignBits = pageShift
- }
- for i := range minAligns {
- if i > alignBits {
- minAligns[i] = 0
- } else if minAligns[i] == 0 {
- minAligns[i] = c.size
- }
- }
- prevSize = c.size
- fmt.Fprintf(w, "// %5d %9d %10d %7d %10d %8.2f%% %9d\n", i, c.size, spanSize, objects, tailWaste, 100*maxWaste, 1<<alignBits)
- }
- fmt.Fprintf(w, "\n")
-
- fmt.Fprintf(w, "// %-9s %-4s %-12s\n", "alignment", "bits", "min obj size")
- for bits, size := range minAligns {
- if size == 0 {
- break
- }
- if bits+1 < len(minAligns) && size == minAligns[bits+1] {
- continue
- }
- fmt.Fprintf(w, "// %9d %4d %12d\n", 1<<bits, bits, size)
- }
- fmt.Fprintf(w, "\n")
-}
-
-func maxObjsPerSpan(classes []class) int {
- most := 0
- for _, c := range classes[1:] {
- n := c.npages * pageSize / c.size
- most = max(most, n)
- }
- return most
-}
-
-func printClasses(w io.Writer, classes []class) {
- fmt.Fprintln(w, "const (")
- fmt.Fprintf(w, "minHeapAlign = %d\n", minHeapAlign)
- fmt.Fprintf(w, "_MaxSmallSize = %d\n", maxSmallSize)
- fmt.Fprintf(w, "smallSizeDiv = %d\n", smallSizeDiv)
- fmt.Fprintf(w, "smallSizeMax = %d\n", smallSizeMax)
- fmt.Fprintf(w, "largeSizeDiv = %d\n", largeSizeDiv)
- fmt.Fprintf(w, "_NumSizeClasses = %d\n", len(classes))
- fmt.Fprintf(w, "_PageShift = %d\n", pageShift)
- fmt.Fprintf(w, "maxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
- fmt.Fprintln(w, ")")
-
- fmt.Fprint(w, "var class_to_size = [_NumSizeClasses]uint16 {")
- for _, c := range classes {
- fmt.Fprintf(w, "%d,", c.size)
- }
- fmt.Fprintln(w, "}")
-
- fmt.Fprint(w, "var class_to_allocnpages = [_NumSizeClasses]uint8 {")
- for _, c := range classes {
- fmt.Fprintf(w, "%d,", c.npages)
- }
- fmt.Fprintln(w, "}")
-
- fmt.Fprint(w, "var class_to_divmagic = [_NumSizeClasses]uint32 {")
- for _, c := range classes {
- if c.size == 0 {
- fmt.Fprintf(w, "0,")
- continue
- }
- fmt.Fprintf(w, "^uint32(0)/%d+1,", c.size)
- }
- fmt.Fprintln(w, "}")
-
- // map from size to size class, for small sizes.
- sc := make([]int, smallSizeMax/smallSizeDiv+1)
- for i := range sc {
- size := i * smallSizeDiv
- for j, c := range classes {
- if c.size >= size {
- sc[i] = j
- break
- }
- }
- }
- fmt.Fprint(w, "var size_to_class8 = [smallSizeMax/smallSizeDiv+1]uint8 {")
- for _, v := range sc {
- fmt.Fprintf(w, "%d,", v)
- }
- fmt.Fprintln(w, "}")
-
- // map from size to size class, for large sizes.
- sc = make([]int, (maxSmallSize-smallSizeMax)/largeSizeDiv+1)
- for i := range sc {
- size := smallSizeMax + i*largeSizeDiv
- for j, c := range classes {
- if c.size >= size {
- sc[i] = j
- break
- }
- }
- }
- fmt.Fprint(w, "var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv+1]uint8 {")
- for _, v := range sc {
- fmt.Fprintf(w, "%d,", v)
- }
- fmt.Fprintln(w, "}")
-}
diff --git a/src/runtime/mpagealloc.go b/src/runtime/mpagealloc.go
index c9491e31f4..4c58fb6e02 100644
--- a/src/runtime/mpagealloc.go
+++ b/src/runtime/mpagealloc.go
@@ -49,6 +49,7 @@ package runtime
import (
"internal/runtime/atomic"
+ "internal/runtime/gc"
"unsafe"
)
@@ -58,7 +59,7 @@ const (
pallocChunkPages = 1 << logPallocChunkPages
pallocChunkBytes = pallocChunkPages * pageSize
logPallocChunkPages = 9
- logPallocChunkBytes = logPallocChunkPages + pageShift
+ logPallocChunkBytes = logPallocChunkPages + gc.PageShift
// The number of radix bits for each level.
//
diff --git a/src/runtime/msize.go b/src/runtime/msize.go
index 64d1531ab0..428a86e5aa 100644
--- a/src/runtime/msize.go
+++ b/src/runtime/msize.go
@@ -9,6 +9,8 @@
package runtime
+import "internal/runtime/gc"
+
// Returns size of the memory block that mallocgc will allocate if you ask for the size,
// minus any inline space for metadata.
func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
@@ -20,10 +22,10 @@ func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
}
// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
// from the result if we have one, since mallocgc will add it back in.
- if reqSize <= smallSizeMax-8 {
- return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
+ if reqSize <= gc.SmallSizeMax-8 {
+ return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size)
}
- return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
+ return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass128[divRoundUp(reqSize-gc.SmallSizeMax, gc.LargeSizeDiv)]]) - (reqSize - size)
}
// Large object. Align reqSize up to the next page. Check for overflow.
reqSize += pageSize - 1
diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go
index 888798c600..ea61385998 100644
--- a/src/runtime/mstats.go
+++ b/src/runtime/mstats.go
@@ -8,6 +8,7 @@ package runtime
import (
"internal/runtime/atomic"
+ "internal/runtime/gc"
"unsafe"
)
@@ -397,23 +398,23 @@ func readmemstats_m(stats *MemStats) {
nFree := consStats.largeFreeCount
// Collect per-sizeclass stats.
- var bySize [_NumSizeClasses]struct {
+ var bySize [gc.NumSizeClasses]struct {
Size uint32
Mallocs uint64
Frees uint64
}
for i := range bySize {
- bySize[i].Size = uint32(class_to_size[i])
+ bySize[i].Size = uint32(gc.SizeClassToSize[i])
// Malloc stats.
a := consStats.smallAllocCount[i]
- totalAlloc += a * uint64(class_to_size[i])
+ totalAlloc += a * uint64(gc.SizeClassToSize[i])
nMalloc += a
bySize[i].Mallocs = a
// Free stats.
f := consStats.smallFreeCount[i]
- totalFree += f * uint64(class_to_size[i])
+ totalFree += f * uint64(gc.SizeClassToSize[i])
nFree += f
bySize[i].Frees = f
}
@@ -678,13 +679,13 @@ type heapStatsDelta struct {
//
// These are all uint64 because they're cumulative, and could quickly wrap
// around otherwise.
- tinyAllocCount uint64 // number of tiny allocations
- largeAlloc uint64 // bytes allocated for large objects
- largeAllocCount uint64 // number of large object allocations
- smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
- largeFree uint64 // bytes freed for large objects (>maxSmallSize)
- largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
- smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
+ tinyAllocCount uint64 // number of tiny allocations
+ largeAlloc uint64 // bytes allocated for large objects
+ largeAllocCount uint64 // number of large object allocations
+ smallAllocCount [gc.NumSizeClasses]uint64 // number of allocs for small objects
+ largeFree uint64 // bytes freed for large objects (>maxSmallSize)
+ largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
+ smallFreeCount [gc.NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
// NOTE: This struct must be a multiple of 8 bytes in size because it
// is stored in an array. If it's not, atomic accesses to the above
diff --git a/src/runtime/sizeclasses.go b/src/runtime/sizeclasses.go
deleted file mode 100644
index bbcaa9e983..0000000000
--- a/src/runtime/sizeclasses.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Code generated by mksizeclasses.go; DO NOT EDIT.
-//go:generate go run mksizeclasses.go
-
-package runtime
-
-// class bytes/obj bytes/span objects tail waste max waste min align
-// 1 8 8192 1024 0 87.50% 8
-// 2 16 8192 512 0 43.75% 16
-// 3 24 8192 341 8 29.24% 8
-// 4 32 8192 256 0 21.88% 32
-// 5 48 8192 170 32 31.52% 16
-// 6 64 8192 128 0 23.44% 64
-// 7 80 8192 102 32 19.07% 16
-// 8 96 8192 85 32 15.95% 32
-// 9 112 8192 73 16 13.56% 16
-// 10 128 8192 64 0 11.72% 128
-// 11 144 8192 56 128 11.82% 16
-// 12 160 8192 51 32 9.73% 32
-// 13 176 8192 46 96 9.59% 16
-// 14 192 8192 42 128 9.25% 64
-// 15 208 8192 39 80 8.12% 16
-// 16 224 8192 36 128 8.15% 32
-// 17 240 8192 34 32 6.62% 16
-// 18 256 8192 32 0 5.86% 256
-// 19 288 8192 28 128 12.16% 32
-// 20 320 8192 25 192 11.80% 64
-// 21 352 8192 23 96 9.88% 32
-// 22 384 8192 21 128 9.51% 128
-// 23 416 8192 19 288 10.71% 32
-// 24 448 8192 18 128 8.37% 64
-// 25 480 8192 17 32 6.82% 32
-// 26 512 8192 16 0 6.05% 512
-// 27 576 8192 14 128 12.33% 64
-// 28 640 8192 12 512 15.48% 128
-// 29 704 8192 11 448 13.93% 64
-// 30 768 8192 10 512 13.94% 256
-// 31 896 8192 9 128 15.52% 128
-// 32 1024 8192 8 0 12.40% 1024
-// 33 1152 8192 7 128 12.41% 128
-// 34 1280 8192 6 512 15.55% 256
-// 35 1408 16384 11 896 14.00% 128
-// 36 1536 8192 5 512 14.00% 512
-// 37 1792 16384 9 256 15.57% 256
-// 38 2048 8192 4 0 12.45% 2048
-// 39 2304 16384 7 256 12.46% 256
-// 40 2688 8192 3 128 15.59% 128
-// 41 3072 24576 8 0 12.47% 1024
-// 42 3200 16384 5 384 6.22% 128
-// 43 3456 24576 7 384 8.83% 128
-// 44 4096 8192 2 0 15.60% 4096
-// 45 4864 24576 5 256 16.65% 256
-// 46 5376 16384 3 256 10.92% 256
-// 47 6144 24576 4 0 12.48% 2048
-// 48 6528 32768 5 128 6.23% 128
-// 49 6784 40960 6 256 4.36% 128
-// 50 6912 49152 7 768 3.37% 256
-// 51 8192 8192 1 0 15.61% 8192
-// 52 9472 57344 6 512 14.28% 256
-// 53 9728 49152 5 512 3.64% 512
-// 54 10240 40960 4 0 4.99% 2048
-// 55 10880 32768 3 128 6.24% 128
-// 56 12288 24576 2 0 11.45% 4096
-// 57 13568 40960 3 256 9.99% 256
-// 58 14336 57344 4 0 5.35% 2048
-// 59 16384 16384 1 0 12.49% 8192
-// 60 18432 73728 4 0 11.11% 2048
-// 61 19072 57344 3 128 3.57% 128
-// 62 20480 40960 2 0 6.87% 4096
-// 63 21760 65536 3 256 6.25% 256
-// 64 24576 24576 1 0 11.45% 8192
-// 65 27264 81920 3 128 10.00% 128
-// 66 28672 57344 2 0 4.91% 4096
-// 67 32768 32768 1 0 12.50% 8192
-
-// alignment bits min obj size
-// 8 3 8
-// 16 4 32
-// 32 5 256
-// 64 6 512
-// 128 7 768
-// 4096 12 28672
-// 8192 13 32768
-
-const (
- minHeapAlign = 8
- _MaxSmallSize = 32768
- smallSizeDiv = 8
- smallSizeMax = 1024
- largeSizeDiv = 128
- _NumSizeClasses = 68
- _PageShift = 13
- maxObjsPerSpan = 1024
-)
-
-var class_to_size = [_NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
-var class_to_allocnpages = [_NumSizeClasses]uint8{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 2, 3, 1, 3, 2, 3, 4, 5, 6, 1, 7, 6, 5, 4, 3, 5, 7, 2, 9, 7, 5, 8, 3, 10, 7, 4}
-var class_to_divmagic = [_NumSizeClasses]uint32{0, ^uint32(0)/8 + 1, ^uint32(0)/16 + 1, ^uint32(0)/24 + 1, ^uint32(0)/32 + 1, ^uint32(0)/48 + 1, ^uint32(0)/64 + 1, ^uint32(0)/80 + 1, ^uint32(0)/96 + 1, ^uint32(0)/112 + 1, ^uint32(0)/128 + 1, ^uint32(0)/144 + 1, ^uint32(0)/160 + 1, ^uint32(0)/176 + 1, ^uint32(0)/192 + 1, ^uint32(0)/208 + 1, ^uint32(0)/224 + 1, ^uint32(0)/240 + 1, ^uint32(0)/256 + 1, ^uint32(0)/288 + 1, ^uint32(0)/320 + 1, ^uint32(0)/352 + 1, ^uint32(0)/384 + 1, ^uint32(0)/416 + 1, ^uint32(0)/448 + 1, ^uint32(0)/480 + 1, ^uint32(0)/512 + 1, ^uint32(0)/576 + 1, ^uint32(0)/640 + 1, ^uint32(0)/704 + 1, ^uint32(0)/768 + 1, ^uint32(0)/896 + 1, ^uint32(0)/1024 + 1, ^uint32(0)/1152 + 1, ^uint32(0)/1280 + 1, ^uint32(0)/1408 + 1, ^uint32(0)/1536 + 1, ^uint32(0)/1792 + 1, ^uint32(0)/2048 + 1, ^uint32(0)/2304 + 1, ^uint32(0)/2688 + 1, ^uint32(0)/3072 + 1, ^uint32(0)/3200 + 1, ^uint32(0)/3456 + 1, ^uint32(0)/4096 + 1, ^uint32(0)/4864 + 1, ^uint32(0)/5376 + 1, ^uint32(0)/6144 + 1, ^uint32(0)/6528 + 1, ^uint32(0)/6784 + 1, ^uint32(0)/6912 + 1, ^uint32(0)/8192 + 1, ^uint32(0)/9472 + 1, ^uint32(0)/9728 + 1, ^uint32(0)/10240 + 1, ^uint32(0)/10880 + 1, ^uint32(0)/12288 + 1, ^uint32(0)/13568 + 1, ^uint32(0)/14336 + 1, ^uint32(0)/16384 + 1, ^uint32(0)/18432 + 1, ^uint32(0)/19072 + 1, ^uint32(0)/20480 + 1, ^uint32(0)/21760 + 1, ^uint32(0)/24576 + 1, ^uint32(0)/27264 + 1, ^uint32(0)/28672 + 1, ^uint32(0)/32768 + 1}
-var size_to_class8 = [smallSizeMax/smallSizeDiv + 1]uint8{0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
-var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv + 1]uint8{32, 33, 34, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 40, 41, 41, 41, 42, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 48, 48, 48, 49, 49, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67}
diff --git a/src/runtime/stack.go b/src/runtime/stack.go
index a2866322a9..2fedaa9421 100644
--- a/src/runtime/stack.go
+++ b/src/runtime/stack.go
@@ -10,6 +10,7 @@ import (
"internal/goarch"
"internal/goos"
"internal/runtime/atomic"
+ "internal/runtime/gc"
"internal/runtime/sys"
"unsafe"
)
@@ -161,11 +162,11 @@ type stackpoolItem struct {
// Global pool of large stack spans.
var stackLarge struct {
lock mutex
- free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
+ free [heapAddrBits - gc.PageShift]mSpanList // free lists by log_2(s.npages)
}
func stackinit() {
- if _StackCacheSize&_PageMask != 0 {
+ if _StackCacheSize&pageMask != 0 {
throw("cache size must be a multiple of page size")
}
for i := range stackpool {
@@ -196,7 +197,7 @@ func stackpoolalloc(order uint8) gclinkptr {
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// no free stacks. Allocate another span worth.
- s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
+ s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
if s == nil {
throw("out of memory")
}
@@ -390,7 +391,7 @@ func stackalloc(n uint32) stack {
v = unsafe.Pointer(x)
} else {
var s *mspan
- npage := uintptr(n) >> _PageShift
+ npage := uintptr(n) >> gc.PageShift
log2npage := stacklog2(npage)
// Try to get a stack from the large stack cache.
diff --git a/src/runtime/traceallocfree.go b/src/runtime/traceallocfree.go
index 40f1cfe8ab..70e48ea3a6 100644
--- a/src/runtime/traceallocfree.go
+++ b/src/runtime/traceallocfree.go
@@ -8,6 +8,7 @@ package runtime
import (
"internal/abi"
+ "internal/runtime/gc"
"internal/runtime/sys"
"internal/trace/tracev2"
)
@@ -38,7 +39,7 @@ func traceSnapshotMemory(gen uintptr) {
// Emit info.
w.varint(uint64(trace.minPageHeapAddr))
w.varint(uint64(pageSize))
- w.varint(uint64(minHeapAlign))
+ w.varint(uint64(gc.MinHeapAlign))
w.varint(uint64(fixedStack))
// Finish writing the batch.
@@ -129,7 +130,7 @@ func (tl traceLocker) HeapObjectFree(addr uintptr) {
// traceHeapObjectID creates a trace ID for a heap object at address addr.
func traceHeapObjectID(addr uintptr) traceArg {
- return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
+ return traceArg(uint64(addr)-trace.minPageHeapAddr) / gc.MinHeapAlign
}
// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.