From 14c7caae5074fdf0d97a3ad995e20c63e4065cbf Mon Sep 17 00:00:00 2001 From: Martin Möhrmann Date: Mon, 13 Jul 2020 18:12:20 +0200 Subject: runtime: add 24 byte allocation size class MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL introduces a 24 byte allocation size class which fits 3 pointers on 64 bit and 6 pointers on 32 bit architectures. Notably this new size class fits a slice header on 64 bit architectures exactly while previously a 32 byte size class would have been used for allocating a slice header on the heap. The main complexity added with this CL is that heapBitsSetType needs to handle objects that aren't 16-byte aligned but contain more than a single pointer on 64-bit architectures. Due to having a non 16 byte aligned size class on 32 bit a h.shift of 2 is now possible which means a heap bitmap byte might only be partially written. Due to this already having been possible on 64 bit before the heap bitmap code only needed minor adjustments for 32 bit doublecheck code paths. Note that this CL changes the slice capacity allocated by append for slice growth to a target capacity of 17 to 24 bytes. On 64 bit architectures the capacity of the slice returned by append([]byte{}, make([]byte, 24)...)) is 32 bytes before and 24 bytes after this CL. Depending on allocation patterns of the specific Go program this can increase the number of total alloctions as subsequent appends to the slice can trigger slice growth earlier than before. On the other side if the slice is never appended to again above its capacity this will lower heap usage by 8 bytes. This CL changes the set of size classes reported in the runtime.MemStats.BySize array due to it being limited to a total of 61 size classes. The new 24 byte size class is now included and the 20480 byte size class is not included anymore. Fixes #8885 name old time/op new time/op delta Template 196ms ± 3% 194ms ± 2% ~ (p=0.247 n=10+10) Unicode 85.6ms ±16% 88.1ms ± 1% ~ (p=0.165 n=10+10) GoTypes 673ms ± 2% 668ms ± 2% ~ (p=0.258 n=9+9) Compiler 3.14s ± 6% 3.08s ± 1% ~ (p=0.243 n=10+9) SSA 6.82s ± 1% 6.76s ± 1% -0.87% (p=0.006 n=9+10) Flate 128ms ± 7% 127ms ± 3% ~ (p=0.739 n=10+10) GoParser 154ms ± 3% 153ms ± 4% ~ (p=0.730 n=9+9) Reflect 404ms ± 1% 412ms ± 4% +1.99% (p=0.022 n=9+10) Tar 172ms ± 4% 170ms ± 4% ~ (p=0.065 n=10+9) XML 231ms ± 4% 230ms ± 3% ~ (p=0.912 n=10+10) LinkCompiler 341ms ± 1% 339ms ± 1% ~ (p=0.243 n=9+10) ExternalLinkCompiler 1.72s ± 1% 1.72s ± 1% ~ (p=0.661 n=9+10) LinkWithoutDebugCompiler 221ms ± 2% 221ms ± 2% ~ (p=0.529 n=10+10) StdCmd 18.4s ± 3% 18.2s ± 1% ~ (p=0.515 n=10+8) name old user-time/op new user-time/op delta Template 238ms ± 4% 243ms ± 6% ~ (p=0.661 n=9+10) Unicode 116ms ± 6% 113ms ± 3% -3.37% (p=0.035 n=9+10) GoTypes 854ms ± 2% 848ms ± 2% ~ (p=0.604 n=9+10) Compiler 4.10s ± 1% 4.11s ± 1% ~ (p=0.481 n=8+9) SSA 9.49s ± 1% 9.41s ± 1% -0.92% (p=0.001 n=9+10) Flate 149ms ± 6% 151ms ± 7% ~ (p=0.481 n=10+10) GoParser 189ms ± 2% 190ms ± 2% ~ (p=0.497 n=9+10) Reflect 511ms ± 2% 508ms ± 2% ~ (p=0.211 n=9+10) Tar 215ms ± 4% 212ms ± 3% ~ (p=0.105 n=10+10) XML 288ms ± 2% 288ms ± 2% ~ (p=0.971 n=10+10) LinkCompiler 559ms ± 4% 557ms ± 1% ~ (p=0.968 n=9+10) ExternalLinkCompiler 1.78s ± 1% 1.77s ± 1% ~ (p=0.055 n=8+10) LinkWithoutDebugCompiler 245ms ± 3% 245ms ± 2% ~ (p=0.684 n=10+10) name old alloc/op new alloc/op delta Template 34.8MB ± 0% 34.4MB ± 0% -0.95% (p=0.000 n=9+10) Unicode 28.6MB ± 0% 28.3MB ± 0% -0.95% (p=0.000 n=10+10) GoTypes 115MB ± 0% 114MB ± 0% -1.02% (p=0.000 n=10+9) Compiler 554MB ± 0% 549MB ± 0% -0.86% (p=0.000 n=9+10) SSA 1.28GB ± 0% 1.27GB ± 0% -0.83% (p=0.000 n=10+10) Flate 21.8MB ± 0% 21.6MB ± 0% -0.87% (p=0.000 n=8+10) GoParser 26.7MB ± 0% 26.4MB ± 0% -0.97% (p=0.000 n=10+9) Reflect 75.0MB ± 0% 74.1MB ± 0% -1.18% (p=0.000 n=10+10) Tar 32.6MB ± 0% 32.3MB ± 0% -0.94% (p=0.000 n=10+7) XML 41.5MB ± 0% 41.2MB ± 0% -0.90% (p=0.000 n=10+8) LinkCompiler 105MB ± 0% 104MB ± 0% -0.94% (p=0.000 n=10+10) ExternalLinkCompiler 153MB ± 0% 152MB ± 0% -0.69% (p=0.000 n=10+10) LinkWithoutDebugCompiler 63.7MB ± 0% 63.6MB ± 0% -0.13% (p=0.000 n=10+10) name old allocs/op new allocs/op delta Template 336k ± 0% 336k ± 0% +0.02% (p=0.002 n=10+10) Unicode 332k ± 0% 332k ± 0% ~ (p=0.447 n=10+10) GoTypes 1.16M ± 0% 1.16M ± 0% +0.01% (p=0.001 n=10+10) Compiler 4.92M ± 0% 4.92M ± 0% +0.01% (p=0.000 n=10+10) SSA 11.9M ± 0% 11.9M ± 0% +0.02% (p=0.000 n=9+10) Flate 214k ± 0% 214k ± 0% +0.02% (p=0.032 n=10+8) GoParser 270k ± 0% 270k ± 0% +0.02% (p=0.004 n=10+9) Reflect 877k ± 0% 877k ± 0% +0.01% (p=0.000 n=10+10) Tar 313k ± 0% 313k ± 0% ~ (p=0.075 n=9+10) XML 387k ± 0% 387k ± 0% +0.02% (p=0.007 n=10+10) LinkCompiler 455k ± 0% 456k ± 0% +0.08% (p=0.000 n=10+9) ExternalLinkCompiler 670k ± 0% 671k ± 0% +0.06% (p=0.000 n=10+10) LinkWithoutDebugCompiler 113k ± 0% 113k ± 0% ~ (p=0.149 n=10+10) name old maxRSS/op new maxRSS/op delta Template 34.1M ± 1% 34.1M ± 1% ~ (p=0.853 n=10+10) Unicode 35.1M ± 1% 34.6M ± 1% -1.43% (p=0.000 n=10+10) GoTypes 72.8M ± 3% 73.3M ± 2% ~ (p=0.724 n=10+10) Compiler 288M ± 3% 295M ± 4% ~ (p=0.393 n=10+10) SSA 630M ± 1% 622M ± 1% -1.18% (p=0.001 n=10+10) Flate 26.0M ± 1% 26.2M ± 2% ~ (p=0.493 n=10+10) GoParser 28.6M ± 1% 28.5M ± 2% ~ (p=0.256 n=10+10) Reflect 55.5M ± 2% 55.4M ± 1% ~ (p=0.436 n=10+10) Tar 33.0M ± 1% 32.8M ± 2% ~ (p=0.075 n=10+10) XML 38.7M ± 1% 39.0M ± 1% ~ (p=0.053 n=9+10) LinkCompiler 164M ± 1% 164M ± 1% -0.27% (p=0.029 n=10+10) ExternalLinkCompiler 174M ± 0% 173M ± 0% -0.33% (p=0.002 n=9+10) LinkWithoutDebugCompiler 137M ± 0% 136M ± 2% ~ (p=0.825 n=9+10) Change-Id: I9ecf2a10024513abef8fbfbe519e44e0b29b6167 Reviewed-on: https://go-review.googlesource.com/c/go/+/242258 Trust: Martin Möhrmann Trust: Michael Knyszek Run-TryBot: Martin Möhrmann TryBot-Result: Go Bot Reviewed-by: Michael Knyszek Reviewed-by: Keith Randall --- src/runtime/mbitmap.go | 112 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 96 insertions(+), 16 deletions(-) (limited to 'src/runtime/mbitmap.go') diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 8de44c14b9..51c3625c3d 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -30,10 +30,9 @@ // indicates scanning can ignore the rest of the allocation. // // The 2-bit entries are split when written into the byte, so that the top half -// of the byte contains 4 high bits and the bottom half contains 4 low (pointer) -// bits. -// This form allows a copy from the 1-bit to the 4-bit form to keep the -// pointer bits contiguous, instead of having to space them out. +// of the byte contains 4 high (scan) bits and the bottom half contains 4 low +// (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to +// keep the pointer bits contiguous, instead of having to space them out. // // The code makes use of the fact that the zero value for a heap // bitmap means scalar/dead. This property must be preserved when @@ -816,6 +815,12 @@ func (s *mspan) countAlloc() int { func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { const doubleCheck = false // slow but helpful; enable to test modifications to this code + const ( + mask1 = bitPointer | bitScan // 00010001 + mask2 = bitPointer | bitScan | mask1<> 1 + + // For h.shift > 1 heap bits cross a byte boundary and need to be written part + // to h.bitp and part to the next h.bitp. + switch h.shift { + case 0: + *h.bitp &^= mask3 << 0 + *h.bitp |= hb << 0 + case 1: + *h.bitp &^= mask3 << 1 + *h.bitp |= hb << 1 + case 2: + *h.bitp &^= mask2 << 2 + *h.bitp |= (hb & mask2) << 2 + // Two words written to the first byte. + // Advance two words to get to the next byte. + h = h.next().next() + *h.bitp &^= mask1 + *h.bitp |= (hb >> 2) & mask1 + case 3: + *h.bitp &^= mask1 << 3 + *h.bitp |= (hb & mask1) << 3 + // One word written to the first byte. + // Advance one word to get to the next byte. + h = h.next() + *h.bitp &^= mask2 + *h.bitp |= (hb >> 1) & mask2 + } + return } // Copy from 1-bit ptrmask into 2-bit bitmap. @@ -1079,7 +1149,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { // word must be set to scan since there are pointers // somewhere in the object. // In all following words, we set the scan/dead - // appropriately to indicate that the object contains + // appropriately to indicate that the object continues // to the next 2-bit entry in the bitmap. // // We set four bits at a time here, but if the object @@ -1095,12 +1165,22 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { b >>= 4 nb -= 4 - case sys.PtrSize == 8 && h.shift == 2: + case h.shift == 2: // Ptrmask and heap bitmap are misaligned. + // + // On 32 bit architectures only the 6-word object that corresponds + // to a 24 bytes size class can start with h.shift of 2 here since + // all other non 16 byte aligned size classes have been handled by + // special code paths at the beginning of heapBitsSetType on 32 bit. + // + // Many size classes are only 16 byte aligned. On 64 bit architectures + // this results in a heap bitmap position starting with a h.shift of 2. + // // The bits for the first two words are in a byte shared // with another object, so we must be careful with the bits // already there. - // We took care of 1-word and 2-word objects above, + // + // We took care of 1-word, 2-word, and 3-word objects above, // so this is at least a 6-word object. hb = (b & (bitPointer | bitPointer<= nw { - // We know that there is more data, because we handled 2-word objects above. + // We know that there is more data, because we handled 2-word and 3-word objects above. // This must be at least a 6-word object. If we're out of pointer words, // mark no scan in next bitmap byte and finish. hb = 0 @@ -1248,12 +1328,12 @@ Phase4: // Handle the first byte specially if it's shared. See // Phase 1 for why this is the only special case we need. if doubleCheck { - if !(h.shift == 0 || (sys.PtrSize == 8 && h.shift == 2)) { + if !(h.shift == 0 || h.shift == 2) { print("x=", x, " size=", size, " cnw=", h.shift, "\n") throw("bad start shift") } } - if sys.PtrSize == 8 && h.shift == 2 { + if h.shift == 2 { *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)< Date: Wed, 29 Jul 2020 19:00:37 +0000 Subject: runtime: make the span allocation purpose more explicit This change modifies mheap's span allocation API to have each caller declare a purpose, defined as a new enum called spanAllocType. The purpose behind this change is two-fold: 1. Tight control over who gets to allocate heap memory is, generally speaking, a good thing. Every codepath that allocates heap memory places additional implicit restrictions on the allocator. A notable example of a restriction is work bufs coming from heap memory: write barriers are not allowed in allocation paths because then we could have a situation where the allocator calls into the allocator. 2. Memory statistic updating is explicit. Instead of passing an opaque pointer for statistic updating, which places restrictions on how that statistic may be updated, we use the spanAllocType to determine which statistic to update and how. We also take this opportunity to group all the statistic updating code together, which should make the accounting code a little easier to follow. Change-Id: Ic0b0898959ba2a776f67122f0e36c9d7d60e3085 Reviewed-on: https://go-review.googlesource.com/c/go/+/246970 Trust: Michael Knyszek Run-TryBot: Michael Knyszek TryBot-Result: Go Bot Reviewed-by: Michael Pratt --- src/runtime/mbitmap.go | 4 +-- src/runtime/mgcwork.go | 4 +-- src/runtime/mheap.go | 78 +++++++++++++++++++++++++++++++++++++------------- src/runtime/stack.go | 12 ++++---- 4 files changed, 68 insertions(+), 30 deletions(-) (limited to 'src/runtime/mbitmap.go') diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 51c3625c3d..fbfaae0f93 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -1868,12 +1868,12 @@ func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize) // Compute the number of pages needed for bitmapBytes. pages := divRoundUp(bitmapBytes, pageSize) - s := mheap_.allocManual(pages, &memstats.gc_sys) + s := mheap_.allocManual(pages, spanAllocPtrScalarBits) runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) return s } func dematerializeGCProg(s *mspan) { - mheap_.freeManual(s, &memstats.gc_sys) + mheap_.freeManual(s, spanAllocPtrScalarBits) } func dumpGCProg(p *byte) { diff --git a/src/runtime/mgcwork.go b/src/runtime/mgcwork.go index 51e0fe9219..b3a068661e 100644 --- a/src/runtime/mgcwork.go +++ b/src/runtime/mgcwork.go @@ -371,7 +371,7 @@ func getempty() *workbuf { } if s == nil { systemstack(func() { - s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys) + s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf) }) if s == nil { throw("out of memory") @@ -473,7 +473,7 @@ func freeSomeWbufs(preemptible bool) bool { break } work.wbufSpans.free.remove(span) - mheap_.freeManual(span, &memstats.gc_sys) + mheap_.freeManual(span, spanAllocWorkBuf) } }) more := !work.wbufSpans.free.isEmpty() diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 40fd58b0ef..df659e222b 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -861,6 +861,22 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr { return nFreed } +// spanAllocType represents the type of allocation to make, or +// the type of allocation to be freed. +type spanAllocType uint8 + +const ( + spanAllocHeap spanAllocType = iota // heap span + spanAllocStack // stack span + spanAllocPtrScalarBits // unrolled GC prog bitmap span + spanAllocWorkBuf // work buf span +) + +// manual returns true if the span allocation is manually managed. +func (s spanAllocType) manual() bool { + return s != spanAllocHeap +} + // alloc allocates a new span of npage pages from the GC'd heap. // // spanclass indicates the span's size class and scannability. @@ -877,7 +893,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan if h.sweepdone == 0 { h.reclaim(npages) } - s = h.allocSpan(npages, false, spanclass, &memstats.heap_inuse) + s = h.allocSpan(npages, spanAllocHeap, spanclass) }) if s != nil { @@ -902,9 +918,15 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan // allocManual must be called on the system stack because it may // acquire the heap lock via allocSpan. See mheap for details. // +// If new code is written to call allocManual, do NOT use an +// existing spanAllocType value and instead declare a new one. +// //go:systemstack -func (h *mheap) allocManual(npages uintptr, stat *uint64) *mspan { - return h.allocSpan(npages, true, 0, stat) +func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan { + if !typ.manual() { + throw("manual span allocation called with non-manually-managed type") + } + return h.allocSpan(npages, typ, 0) } // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize)) @@ -1066,7 +1088,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) { // allocSpan allocates an mspan which owns npages worth of memory. // -// If manual == false, allocSpan allocates a heap span of class spanclass +// If typ.manual() == false, allocSpan allocates a heap span of class spanclass // and updates heap accounting. If manual == true, allocSpan allocates a // manually-managed span (spanclass is ignored), and the caller is // responsible for any accounting related to its use of the span. Either @@ -1081,7 +1103,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) { // the heap lock and because it must block GC transitions. // //go:systemstack -func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysStat *uint64) (s *mspan) { +func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) { // Function-global state. gp := getg() base, scav := uintptr(0), uintptr(0) @@ -1143,12 +1165,10 @@ HaveSpan: s.needzero = 1 } nbytes := npages * pageSize - if manual { + if typ.manual() { s.manualFreeList = 0 s.nelems = 0 s.limit = s.base() + s.npages*pageSize - // Manually managed memory doesn't count toward heap_sys. - mSysStatDec(&memstats.heap_sys, s.npages*pageSize) s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1205,7 +1225,18 @@ HaveSpan: mSysStatDec(&memstats.heap_released, scav) } // Update stats. - mSysStatInc(sysStat, nbytes) + switch typ { + case spanAllocHeap: + mSysStatInc(&memstats.heap_inuse, nbytes) + case spanAllocStack: + mSysStatInc(&memstats.stacks_inuse, nbytes) + case spanAllocPtrScalarBits, spanAllocWorkBuf: + mSysStatInc(&memstats.gc_sys, nbytes) + } + if typ.manual() { + // Manually managed memory doesn't count toward heap_sys. + mSysStatDec(&memstats.heap_sys, nbytes) + } mSysStatDec(&memstats.heap_idle, nbytes) // Publish the span in various locations. @@ -1217,7 +1248,7 @@ HaveSpan: // before that happens) or pageInUse is updated. h.setSpans(s.base(), npages, s) - if !manual { + if !typ.manual() { // Mark in-use span in arena page bitmap. // // This publishes the span to the page sweeper, so @@ -1323,13 +1354,13 @@ func (h *mheap) freeSpan(s *mspan) { bytes := s.npages << _PageShift msanfree(base, bytes) } - h.freeSpanLocked(s, true, true) + h.freeSpanLocked(s, spanAllocHeap) unlock(&h.lock) }) } // freeManual frees a manually-managed span returned by allocManual. -// stat must be the same as the stat passed to the allocManual that +// typ must be the same as the spanAllocType passed to the allocManual that // allocated s. // // This must only be called when gcphase == _GCoff. See mSpanState for @@ -1339,16 +1370,14 @@ func (h *mheap) freeSpan(s *mspan) { // the heap lock. See mheap for details. // //go:systemstack -func (h *mheap) freeManual(s *mspan, stat *uint64) { +func (h *mheap) freeManual(s *mspan, typ spanAllocType) { s.needzero = 1 lock(&h.lock) - mSysStatDec(stat, s.npages*pageSize) - mSysStatInc(&memstats.heap_sys, s.npages*pageSize) - h.freeSpanLocked(s, false, true) + h.freeSpanLocked(s, typ) unlock(&h.lock) } -func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) { +func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { switch s.state.get() { case mSpanManual: if s.allocCount != 0 { @@ -1368,12 +1397,21 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) { throw("mheap.freeSpanLocked - invalid span state") } - if acctinuse { + // Update stats. + // + // Mirrors the code in allocSpan. + switch typ { + case spanAllocHeap: mSysStatDec(&memstats.heap_inuse, s.npages*pageSize) + case spanAllocStack: + mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize) + case spanAllocPtrScalarBits, spanAllocWorkBuf: + mSysStatDec(&memstats.gc_sys, s.npages*pageSize) } - if acctidle { - mSysStatInc(&memstats.heap_idle, s.npages*pageSize) + if typ.manual() { + mSysStatInc(&memstats.heap_sys, s.npages*pageSize) } + mSysStatInc(&memstats.heap_idle, s.npages*pageSize) // Mark the space as free. h.pages.free(s.base(), s.npages) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 2afc2635aa..7b9dce5393 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -187,7 +187,7 @@ func stackpoolalloc(order uint8) gclinkptr { lockWithRankMayAcquire(&mheap_.lock, lockRankMheap) if s == nil { // no free stacks. Allocate another span worth. - s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse) + s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack) if s == nil { throw("out of memory") } @@ -251,7 +251,7 @@ func stackpoolfree(x gclinkptr, order uint8) { stackpool[order].item.span.remove(s) s.manualFreeList = 0 osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } } @@ -396,7 +396,7 @@ func stackalloc(n uint32) stack { if s == nil { // Allocate a new stack from the heap. - s = mheap_.allocManual(npage, &memstats.stacks_inuse) + s = mheap_.allocManual(npage, spanAllocStack) if s == nil { throw("out of memory") } @@ -480,7 +480,7 @@ func stackfree(stk stack) { // Free the stack immediately if we're // sweeping. osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } else { // If the GC is running, we can't return a // stack span to the heap because it could be @@ -1193,7 +1193,7 @@ func freeStackSpans() { list.remove(s) s.manualFreeList = 0 osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) } s = next } @@ -1207,7 +1207,7 @@ func freeStackSpans() { next := s.next stackLarge.free[i].remove(s) osStackFree(s) - mheap_.freeManual(s, &memstats.stacks_inuse) + mheap_.freeManual(s, spanAllocStack) s = next } } -- cgit v1.3