aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2021-10-28 17:52:22 +0000
committerMichael Knyszek <mknyszek@google.com>2021-10-29 17:44:15 +0000
commitd8fc7f785e84515c0a5beb8baeb3c1ca35bebf68 (patch)
tree9b0d95bbfcac52519ee27d7d46c6e1a56875572d /src/runtime/malloc.go
parenta88575d662a7e8e4fbb31bf139bcffc063e2a734 (diff)
downloadgo-d8fc7f785e84515c0a5beb8baeb3c1ca35bebf68.tar.xz
runtime: clean up allocation zeroing
Currently, the runtime zeroes allocations in several ways. First, small object spans are always zeroed if they come from mheap, and their slots are zeroed later in mallocgc if needed. Second, large object spans (objects that have their own spans) plumb the need for zeroing down into mheap. Thirdly, large objects that have no pointers have their zeroing delayed until after preemption is reenabled, but before returning in mallocgc. All of this has two consequences: 1. Spans for small objects that come from mheap are sometimes unnecessarily zeroed, even if the mallocgc call that created them doesn't need the object slot to be zeroed. 2. This is all messy and difficult to reason about. This CL simplifies this code, resolving both (1) and (2). First, it recognizes that zeroing in mheap is unnecessary for small object spans; mallocgc and its callees in mcache and mcentral, by design, are *always* able to deal with non-zeroed spans. They must, for they deal with recycled spans all the time. Once this fact is made clear, the only remaining use of zeroing in mheap is for large objects. As a result, this CL lifts mheap zeroing for large objects into mallocgc, to parallel all the other codepaths in mallocgc. This is makes the large object allocation code less surprising. Next, this CL sets the flag for the delayed zeroing explicitly in the one case where it matters, and inverts and renames the flag from isZeroed to delayZeroing. Finally, it adds a check to make sure that only pointer-free allocations take the delayed zeroing codepath, as an extra safety measure. Benchmark results: https://perf.golang.org/search?q=upload:20211028.8 Inspired by tapir.liu@gmail.com's CL 343470. Change-Id: I7e1296adc19ce8a02c8d93a0a5082aefb2673e8f Reviewed-on: https://go-review.googlesource.com/c/go/+/359477 Trust: Michael Knyszek <mknyszek@google.com> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go25
1 files changed, 20 insertions, 5 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c389cb1e45..8af1d96f1a 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -980,8 +980,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
var x unsafe.Pointer
noscan := typ == nil || typ.ptrdata == 0
// In some cases block zeroing can profitably (for latency reduction purposes)
- // be delayed till preemption is possible; isZeroed tracks that state.
- isZeroed := true
+ // be delayed till preemption is possible; delayedZeroing tracks that state.
+ delayedZeroing := false
if size <= maxSmallSize {
if noscan && size < maxTinySize {
// Tiny allocator.
@@ -1079,11 +1079,23 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
shouldhelpgc = true
// For large allocations, keep track of zeroed state so that
// bulk zeroing can be happen later in a preemptible context.
- span, isZeroed = c.allocLarge(size, needzero && !noscan, noscan)
+ span = c.allocLarge(size, noscan)
span.freeindex = 1
span.allocCount = 1
- x = unsafe.Pointer(span.base())
size = span.elemsize
+ x = unsafe.Pointer(span.base())
+ if needzero && span.needzero != 0 {
+ if noscan {
+ delayedZeroing = true
+ } else {
+ memclrNoHeapPointers(x, size)
+ // We've in theory cleared almost the whole span here,
+ // and could take the extra step of actually clearing
+ // the whole thing. However, don't. Any GC bits for the
+ // uncleared parts will be zero, and it's just going to
+ // be needzero = 1 once freed anyway.
+ }
+ }
}
var scanSize uintptr
@@ -1139,7 +1151,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Pointerfree data can be zeroed late in a context where preemption can occur.
// x will keep the memory alive.
- if !isZeroed && needzero {
+ if delayedZeroing {
+ if !noscan {
+ throw("delayed zeroing on data that may contain pointers")
+ }
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
}