aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/arena.go2
-rw-r--r--src/runtime/malloc.go18
-rw-r--r--src/runtime/mbitmap.go16
3 files changed, 24 insertions, 12 deletions
diff --git a/src/runtime/arena.go b/src/runtime/arena.go
index 3ad28533b5..ff59014a8a 100644
--- a/src/runtime/arena.go
+++ b/src/runtime/arena.go
@@ -798,7 +798,7 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
if asanenabled {
// TODO(mknyszek): Track individual objects.
- rzSize := computeRZlog(span.elemsize)
+ rzSize := redZoneSize(span.elemsize)
span.elemsize -= rzSize
span.largeType.Size_ = span.elemsize
rzStart := span.base() + span.elemsize
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index 0700d0d1cd..0605921652 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1035,7 +1035,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// These "redzones" are marked as unaddressable.
var asanRZ uintptr
if asanenabled {
- asanRZ = computeRZlog(size)
+ asanRZ = redZoneSize(size)
size += asanRZ
}
@@ -1074,10 +1074,10 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
// Poison the space between the end of the requested size of x
// and the end of the slot. Unpoison the requested allocation.
frag := elemsize - size
- if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) {
+ if typ != nil && typ.Pointers() && !heapBitsInSpan(elemsize) && size <= maxSmallSize-mallocHeaderSize {
frag -= mallocHeaderSize
}
- asanpoison(unsafe.Add(x, size-asanRZ), asanRZ+frag)
+ asanpoison(unsafe.Add(x, size-asanRZ), asanRZ)
asanunpoison(x, size-asanRZ)
}
@@ -1369,7 +1369,13 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type, needzero bool) (unsafe.
if needzero && span.needzero != 0 {
memclrNoHeapPointers(x, size)
}
- c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ if goarch.PtrSize == 8 && sizeclass == 1 {
+ // initHeapBits already set the pointer bits for the 8-byte sizeclass
+ // on 64-bit platforms.
+ c.scanAlloc += 8
+ } else {
+ c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
+ }
size = uintptr(class_to_size[sizeclass])
// Ensure that the stores above that initialize x to
@@ -2040,9 +2046,9 @@ func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}
-// computeRZlog computes the size of the redzone.
+// redZoneSize computes the size of the redzone for a given allocation.
// Refer to the implementation of the compiler-rt.
-func computeRZlog(userSize uintptr) uintptr {
+func redZoneSize(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0
diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go
index 855acbdaa3..ed5b3e977c 100644
--- a/src/runtime/mbitmap.go
+++ b/src/runtime/mbitmap.go
@@ -640,11 +640,6 @@ func (span *mspan) heapBitsSmallForAddr(addr uintptr) uintptr {
//
//go:nosplit
func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) {
- if goarch.PtrSize == 8 && dataSize == goarch.PtrSize {
- // Already set by initHeapBits.
- return
- }
-
// The objects here are always really small, so a single load is sufficient.
src0 := readUintptr(typ.GCData)
@@ -654,10 +649,21 @@ func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize
if typ.Size_ == goarch.PtrSize {
src = (1 << (dataSize / goarch.PtrSize)) - 1
} else {
+ // N.B. We rely on dataSize being an exact multiple of the type size.
+ // The alternative is to be defensive and mask out src to the length
+ // of dataSize. The purpose is to save on one additional masking operation.
+ if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 {
+ throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_")
+ }
for i := typ.Size_; i < dataSize; i += typ.Size_ {
src |= src0 << (i / goarch.PtrSize)
scanSize += typ.Size_
}
+ if asanenabled {
+ // Mask src down to dataSize. dataSize is going to be a strange size because of
+ // the redzone required for allocations when asan is enabled.
+ src &= (1 << (dataSize / goarch.PtrSize)) - 1
+ }
}
// Since we're never writing more than one uintptr's worth of bits, we're either going