aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go37
1 files changed, 26 insertions, 11 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c7ab928fe6..ce03114edc 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1221,12 +1221,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase != _GCoff {
- // Pass the full size of the allocation to the number of bytes
- // marked.
- //
- // If !goexperiment.AllocHeaders, "size" doesn't include the
- // allocation header, so use span.elemsize unconditionally.
- gcmarknewobject(span, uintptr(x), span.elemsize)
+ gcmarknewobject(span, uintptr(x))
}
if raceenabled {
@@ -1248,12 +1243,28 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
asanunpoison(x, userSize)
}
+ // If !goexperiment.AllocHeaders, "size" doesn't include the
+ // allocation header, so use span.elemsize as the "full" size
+ // for various computations below.
+ //
+ // TODO(mknyszek): We should really count the header as part
+ // of gc_sys or something, but it's risky to change the
+ // accounting so much right now. Just pretend its internal
+ // fragmentation and match the GC's accounting by using the
+ // whole allocation slot.
+ fullSize := size
+ if goexperiment.AllocHeaders {
+ fullSize = span.elemsize
+ }
if rate := MemProfileRate; rate > 0 {
// Note cache c only valid while m acquired; see #47302
- if rate != 1 && size < c.nextSample {
- c.nextSample -= size
+ //
+ // N.B. Use the full size because that matches how the GC
+ // will update the mem profile on the "free" side.
+ if rate != 1 && fullSize < c.nextSample {
+ c.nextSample -= fullSize
} else {
- profilealloc(mp, x, size)
+ profilealloc(mp, x, fullSize)
}
}
mp.mallocing = 0
@@ -1268,6 +1279,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if goexperiment.AllocHeaders && header != nil {
throw("unexpected malloc header in delayed zeroing of large object")
}
+ // N.B. size == fullSize always in this case.
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}
@@ -1278,14 +1290,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if inittrace.active && inittrace.id == getg().goid {
// Init functions are executed sequentially in a single goroutine.
- inittrace.bytes += uint64(size)
+ inittrace.bytes += uint64(fullSize)
}
}
if assistG != nil {
// Account for internal fragmentation in the assist
// debt now that we know it.
- assistG.gcAssistBytes -= int64(size - dataSize)
+ //
+ // N.B. Use the full size because that's how the rest
+ // of the GC accounts for bytes marked.
+ assistG.gcAssistBytes -= int64(fullSize - dataSize)
}
if shouldhelpgc {