aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/malloc.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2024-10-25 18:38:57 +0000
committerMichael Knyszek <mknyszek@google.com>2024-10-25 20:35:44 +0000
commit4bf98186b5e012d56ec2944a41d55178c3dea905 (patch)
treef3a8890f7d1286a2edbeb9b85193fd14274cecf1 /src/runtime/malloc.go
parent3320ce94b669c083e7a94a36dd5d5bceb6c0df0e (diff)
downloadgo-4bf98186b5e012d56ec2944a41d55178c3dea905.tar.xz
runtime: fix mallocgc for asan
This change finally fully fixes mallocgc for asan after the recent refactoring. Here is everything that changed: Fix the accounting for the alloc header; large objects don't have them. Mask out extra bits set from unrolling the bitmap for slice backing stores in writeHeapBitsSmall. The redzone in asan mode makes it so that dataSize is no longer an exact multiple of typ.Size_ in this case (a new assumption I have recently discovered) but we didn't mask out any extra bits, so we'd accidentally set bits in other allocations. Oops. Move the initHeapBits optimization for the 8-byte scan sizeclass on 64-bit platforms up to mallocgc, out from writeHeapBitsSmall. So, this actually caused a problem with asan when the optimization first landed, but we missed it. The issue was then masked once we started passing the redzone down into writeHeapBitsSmall, since the optimization would no longer erroneously fire on asan. What happened was that dataSize would be 8 (because that was the user-provided alloc size) so we'd skip writing heap bits, but it would turn out the redzone bumped the size class, so we'd actually *have* to write the heap bits for that size class. This is not really a problem now *but* it caused problems for me when debugging, since I would try to remove the red zone from dataSize and this would trigger this bug again. Ultimately, this whole situation is confusing because the check in writeHeapBitsSmall is *not* the same as the check in initHeapBits. By moving this check up to mallocgc, we can make the checks align better by matching on the sizeclass, so this should be less error-prone in the future. Change-Id: I1e9819223be23f722f3bf21e63e812f5fb557194 Reviewed-on: https://go-review.googlesource.com/c/go/+/622041 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime/malloc.go')
-rw-r--r--src/runtime/malloc.go18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 0700d0d1cd..0605921652 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1035,7 +1035,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// These "redzones" are marked as unaddressable.
var asanRZ uintptr
if asanenabled {
- asanRZ = computeRZlog(size)
+ asanRZ = redZoneSize(size)
size += asanRZ
}
@@ -1074,10 +1074,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Poison the space between the end of the requested size of x
// and the end of the slot. Unpoison the requested allocation.
frag := elemsize - size
- if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) {
+ if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-mallocHeaderSize {
frag -= mallocHeaderSize
}
- asanpoison(unsafe.Add(x, size-asanRZ), asanRZ+frag)
+ asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
asanunpoison(x, size-asanRZ)
}
@@ -1369,7 +1369,13 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type, needzero bool) (unsafe.
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
- c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+ // initHeapBits already set the pointer bits for the 8-byte sizeclass
+ // on 64-bit platforms.
+ c.scanAlloc += 8
+ } else {
+ c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ }
size = uintptr(class_to_size[sizeclass])
// Ensure that the stores above that initialize x to
@@ -2040,9 +2046,9 @@ func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
-// computeRZlog computes the size of the redzone.
+// redZoneSize computes the size of the redzone for a given allocation.
// Refer to the implementation of the compiler-rt.
-func computeRZlog(userSize uintptr) uintptr {
+func redZoneSize(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0