aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/export_test.go20
-rw-r--r--src/runtime/malloc.go54
-rw-r--r--src/runtime/mem.go113
3 files changed, 119 insertions, 68 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go
index bc471e50a0..931ec7e540 100644
--- a/src/runtime/export_test.go
+++ b/src/runtime/export_test.go
@@ -550,7 +550,7 @@ func MapNextArenaHint() (start, end uintptr, ok bool) {
if !ok {
// We were unable to get the requested reservation.
// Release what we did get and fail.
- sysFreeOS(got, physPageSize)
+ sysUnreserve(got, physPageSize)
}
return
}
@@ -1091,19 +1091,21 @@ func FreePageAlloc(pp *PageAlloc) {
// Free all the mapped space for the summary levels.
if pageAlloc64Bit != 0 {
for l := 0; l < summaryLevels; l++ {
- sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
+ // This isn't quite right, as some of this memory may
+ // be Ready instead of Reserved. The mappedReady and
+ // testSysStat adjustments below correct for the
+ // difference.
+ sysUnreserve(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
}
} else {
resSize := uintptr(0)
for _, s := range p.summary {
resSize += uintptr(cap(s)) * pallocSumBytes
}
- sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
+ // See sysUnreserve comment above.
+ sysUnreserve(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
}
- // Free extra data structures.
- sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
-
// Subtract back out whatever we mapped for the summaries.
// sysUsed adds to p.sysStat and memstats.mappedReady no matter what
// (and in anger should actually be accounted for), and there's no other
@@ -1111,6 +1113,12 @@ func FreePageAlloc(pp *PageAlloc) {
gcController.mappedReady.Add(-int64(p.summaryMappedReady))
testSysStat.add(-int64(p.summaryMappedReady))
+ // Free extra data structures.
+ //
+ // TODO(prattmic): As above, some of this may be Ready, so we should
+ // manually adjust mappedReady and testSysStat?
+ sysUnreserve(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
+
// Free the mapped space for chunks.
for i := range p.chunks {
if x := p.chunks[i]; x != nil {
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c08bc7574b..6a4ee5bd42 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -788,7 +788,7 @@ func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, arenaList *[]arenaIdx)
// particular, this is already how Windows behaves, so
// it would simplify things there.
if v != nil {
- sysFreeOS(v, n)
+ sysUnreserve(v, n)
}
*hintList = hint.next
h.arenaHintAlloc.free(unsafe.Pointer(hint))
@@ -921,58 +921,6 @@ mapped:
return
}
-// sysReserveAligned is like sysReserve, but the returned pointer is
-// aligned to align bytes. It may reserve either n or n+align bytes,
-// so it returns the size that was reserved.
-func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
- if isSbrkPlatform {
- if v != nil {
- throw("unexpected heap arena hint on sbrk platform")
- }
- return sysReserveAlignedSbrk(size, align)
- }
- // Since the alignment is rather large in uses of this
- // function, we're not likely to get it by chance, so we ask
- // for a larger region and remove the parts we don't need.
- retries := 0
-retry:
- p := uintptr(sysReserve(v, size+align, vmaName))
- switch {
- case p == 0:
- return nil, 0
- case p&(align-1) == 0:
- return unsafe.Pointer(p), size + align
- case GOOS == "windows":
- // On Windows we can't release pieces of a
- // reservation, so we release the whole thing and
- // re-reserve the aligned sub-region. This may race,
- // so we may have to try again.
- sysFreeOS(unsafe.Pointer(p), size+align)
- p = alignUp(p, align)
- p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
- if p != uintptr(p2) {
- // Must have raced. Try again.
- sysFreeOS(p2, size)
- if retries++; retries == 100 {
- throw("failed to allocate aligned heap memory; too many retries")
- }
- goto retry
- }
- // Success.
- return p2, size
- default:
- // Trim off the unaligned parts.
- pAligned := alignUp(p, align)
- sysFreeOS(unsafe.Pointer(p), pAligned-p)
- end := pAligned + size
- endLen := (p + size + align) - end
- if endLen > 0 {
- sysFreeOS(unsafe.Pointer(end), endLen)
- }
- return unsafe.Pointer(pAligned), size
- }
-}
-
// enableMetadataHugePages enables huge pages for various sources of heap metadata.
//
// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
diff --git a/src/runtime/mem.go b/src/runtime/mem.go
index 077c5f112b..7d6c6f65c7 100644
--- a/src/runtime/mem.go
+++ b/src/runtime/mem.go
@@ -116,14 +116,21 @@ func sysHugePageCollapse(v unsafe.Pointer, n uintptr) {
//
// sysStat must be non-nil.
//
+// The size and start address must exactly match the size and returned address
+// from the original sysAlloc/sysReserve/sysReserveAligned call. That is,
+// sysFree cannot be used to free a subset of a memory region.
+//
// Don't split the stack as this function may be invoked without a valid G,
// which prevents us from allocating more stack.
//
//go:nosplit
func sysFree(v unsafe.Pointer, n uintptr, sysStat *sysMemStat) {
- // When using ASAN leak detection, the memory being freed is
- // known by the sanitizer. We need to unregister it so it's
- // not accessed by it.
+ // When using ASAN leak detection, the memory being freed is known by
+ // the sanitizer. We need to unregister it so it's not accessed by it.
+ //
+ // lsanunregisterrootregion matches regions by start address and size,
+ // so it is not possible to unregister a subset of the region. This is
+ // why sysFree requires the full region from the initial allocation.
if asanenabled {
lsanunregisterrootregion(v, n)
}
@@ -152,13 +159,12 @@ func sysFault(v unsafe.Pointer, n uintptr) {
// (either via permissions or not committing the memory). Such a reservation is
// thus never backed by physical memory.
//
-// If the pointer passed to it is non-nil, the caller wants the
-// reservation there, but sysReserve can still choose another
-// location if that one is unavailable.
+// If the pointer passed to it is non-nil, the caller wants the reservation
+// there, but sysReserve can still choose another location if that one is
+// unavailable.
//
-// NOTE: sysReserve returns OS-aligned memory, but the heap allocator
-// may use larger alignment, so the caller must be careful to realign the
-// memory obtained by sysReserve.
+// sysReserve returns OS-aligned memory. If a larger alignment is required, use
+// sysReservedAligned.
func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
p := sysReserveOS(v, n, vmaName)
@@ -171,6 +177,95 @@ func sysReserve(v unsafe.Pointer, n uintptr, vmaName string) unsafe.Pointer {
return p
}
+// sysReserveAligned transitions a memory region from None to Reserved.
+//
+// Semantics are equivlent to sysReserve, but the returned pointer is aligned
+// to align bytes. It may reserve either n or n+align bytes, so it returns the
+// size that was reserved.
+func sysReserveAligned(v unsafe.Pointer, size, align uintptr, vmaName string) (unsafe.Pointer, uintptr) {
+ if isSbrkPlatform {
+ if v != nil {
+ throw("unexpected heap arena hint on sbrk platform")
+ }
+ return sysReserveAlignedSbrk(size, align)
+ }
+ // Since the alignment is rather large in uses of this
+ // function, we're not likely to get it by chance, so we ask
+ // for a larger region and remove the parts we don't need.
+ retries := 0
+retry:
+ p := uintptr(sysReserve(v, size+align, vmaName))
+ switch {
+ case p == 0:
+ return nil, 0
+ case p&(align-1) == 0:
+ return unsafe.Pointer(p), size + align
+ case GOOS == "windows":
+ // On Windows we can't release pieces of a
+ // reservation, so we release the whole thing and
+ // re-reserve the aligned sub-region. This may race,
+ // so we may have to try again.
+ sysUnreserve(unsafe.Pointer(p), size+align)
+ p = alignUp(p, align)
+ p2 := sysReserve(unsafe.Pointer(p), size, vmaName)
+ if p != uintptr(p2) {
+ // Must have raced. Try again.
+ sysUnreserve(p2, size)
+ if retries++; retries == 100 {
+ throw("failed to allocate aligned heap memory; too many retries")
+ }
+ goto retry
+ }
+ // Success.
+ return p2, size
+ default:
+ // Trim off the unaligned parts.
+ pAligned := alignUp(p, align)
+ end := pAligned + size
+ endLen := (p + size + align) - end
+
+ // sysUnreserve does not allow unreserving a subset of the
+ // region because LSAN does not allow unregistering a subset.
+ // So we can't call sysUnreserve. Instead we simply unregister
+ // the entire region from LSAN and re-register with the smaller
+ // region before freeing the unecessary portions, which does
+ // allow subsets of the region.
+ if asanenabled {
+ lsanunregisterrootregion(unsafe.Pointer(p), size+align)
+ lsanregisterrootregion(unsafe.Pointer(pAligned), size)
+ }
+ sysFreeOS(unsafe.Pointer(p), pAligned-p)
+ if endLen > 0 {
+ sysFreeOS(unsafe.Pointer(end), endLen)
+ }
+ return unsafe.Pointer(pAligned), size
+ }
+}
+
+// sysUnreserve transitions a memory region from Reserved to None.
+//
+// The size and start address must exactly match the size and returned address
+// from sysReserve/sysReserveAligned. That is, sysUnreserve cannot be used to
+// unreserve a subset of a memory region.
+//
+// Don't split the stack as this function may be invoked without a valid G,
+// which prevents us from allocating more stack.
+//
+//go:nosplit
+func sysUnreserve(v unsafe.Pointer, n uintptr) {
+ // When using ASAN leak detection, the memory being freed is known by
+ // the sanitizer. We need to unregister it so it's not accessed by it.
+ //
+ // lsanunregisterrootregion matches regions by start address and size,
+ // so it is not possible to unregister a subset of the region. This is
+ // why sysUnreserve requires the full region from sysReserve.
+ if asanenabled {
+ lsanunregisterrootregion(v, n)
+ }
+
+ sysFreeOS(v, n)
+}
+
// sysMap transitions a memory region from Reserved to Prepared. It ensures the
// memory region can be efficiently transitioned to Ready.
//