aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagealloc_64bit.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mpagealloc_64bit.go')
-rw-r--r--src/runtime/mpagealloc_64bit.go79
1 files changed, 78 insertions, 1 deletions
diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go
index 0b99209d99..bfc3e0ad90 100644
--- a/src/runtime/mpagealloc_64bit.go
+++ b/src/runtime/mpagealloc_64bit.go
@@ -6,7 +6,10 @@
package runtime
-import "unsafe"
+import (
+ "runtime/internal/atomic"
+ "unsafe"
+)
const (
// The number of levels in the radix tree.
@@ -83,6 +86,12 @@ func (p *pageAlloc) sysInit() {
sl := notInHeapSlice{(*notInHeap)(r), 0, entries}
p.summary[l] = *(*[]pallocSum)(unsafe.Pointer(&sl))
}
+
+ // Set up the scavenge index.
+ nbytes := uintptr(1<<heapAddrBits) / pallocChunkBytes / 8
+ r := sysReserve(nil, nbytes)
+ sl := notInHeapSlice{(*notInHeap)(r), int(nbytes), int(nbytes)}
+ p.scav.index.chunks = *(*[]atomic.Uint8)(unsafe.Pointer(&sl))
}
// sysGrow performs architecture-dependent operations on heap
@@ -177,4 +186,72 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) {
sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
p.summaryMappedReady += need.size()
}
+
+ // Update the scavenge index.
+ p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
+}
+
+// grow increases the index's backing store in response to a heap growth.
+//
+// Returns the amount of memory added to sysStat.
+func (s *scavengeIndex) grow(base, limit uintptr, sysStat *sysMemStat) uintptr {
+ if base%pallocChunkBytes != 0 || limit%pallocChunkBytes != 0 {
+ print("runtime: base = ", hex(base), ", limit = ", hex(limit), "\n")
+ throw("sysGrow bounds not aligned to pallocChunkBytes")
+ }
+ // Map and commit the pieces of chunks that we need.
+ //
+ // We always map the full range of the minimum heap address to the
+ // maximum heap address. We don't do this for the summary structure
+ // because it's quite large and a discontiguous heap could cause a
+ // lot of memory to be used. In this situation, the worst case overhead
+ // is in the single-digit MiB if we map the whole thing.
+ //
+ // The base address of the backing store is always page-aligned,
+ // because it comes from the OS, so it's sufficient to align the
+ // index.
+ haveMin := s.min.Load()
+ haveMax := s.max.Load()
+ needMin := int32(alignDown(uintptr(chunkIndex(base)/8), physPageSize))
+ needMax := int32(alignUp(uintptr((chunkIndex(limit)+7)/8), physPageSize))
+ // Extend the range down to what we have, if there's no overlap.
+ if needMax < haveMin {
+ needMax = haveMin
+ }
+ if needMin > haveMax {
+ needMin = haveMax
+ }
+ have := makeAddrRange(
+ // Avoid a panic from indexing one past the last element.
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMin),
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(haveMax),
+ )
+ need := makeAddrRange(
+ // Avoid a panic from indexing one past the last element.
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMin),
+ uintptr(unsafe.Pointer(&s.chunks[0]))+uintptr(needMax),
+ )
+ // Subtract any overlap from rounding. We can't re-map memory because
+ // it'll be zeroed.
+ need = need.subtract(have)
+
+ // If we've got something to map, map it, and update the slice bounds.
+ if need.size() != 0 {
+ sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat)
+ sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size())
+ // Update the indices only after the new memory is valid.
+ if haveMin == 0 || needMin < haveMin {
+ s.min.Store(needMin)
+ }
+ if haveMax == 0 || needMax > haveMax {
+ s.max.Store(needMax)
+ }
+ }
+ // Update minHeapIdx. Note that even if there's no mapping work to do,
+ // we may still have a new, lower minimum heap address.
+ minHeapIdx := s.minHeapIdx.Load()
+ if baseIdx := int32(chunkIndex(base) / 8); minHeapIdx == 0 || baseIdx < minHeapIdx {
+ s.minHeapIdx.Store(baseIdx)
+ }
+ return need.size()
}