aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mpagealloc_32bit.go
diff options
context:
space:
mode:
authorMichael Anthony Knyszek <mknyszek@google.com>2023-04-20 02:41:08 +0000
committerGopher Robot <gobot@golang.org>2023-04-20 20:08:25 +0000
commit15c12762466b4c5f92b1668f86f73d0b1e66b62b (patch)
treece0b27ce37f73cc1de9dd21fbeb26d1a937a0496 /src/runtime/mpagealloc_32bit.go
parentbdccb85f509d24789694df198fe7bde948aa7955 (diff)
downloadgo-15c12762466b4c5f92b1668f86f73d0b1e66b62b.tar.xz
runtime: bring back minHeapIdx in scavenge index
The scavenge index currently doesn't guard against overflow, and CL 436395 removed the minHeapIdx optimization that allows the chunk scan to skip scanning chunks that haven't been mapped for the heap, and are only available as a consequence of chunks' mapped region being rounded out to a page on both ends. Because the 0'th chunk is never mapped, minHeapIdx effectively prevents overflow, fixing the iOS breakage. This change also refactors growth and initialization a little bit to decouple it from pageAlloc a bit and share code across platforms. Change-Id: If7fc3245aa81cf99451bf8468458da31986a9b0a Reviewed-on: https://go-review.googlesource.com/c/go/+/486695 Auto-Submit: Michael Knyszek <mknyszek@google.com> Reviewed-by: Michael Pratt <mpratt@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/mpagealloc_32bit.go')
-rw-r--r--src/runtime/mpagealloc_32bit.go35
1 files changed, 23 insertions, 12 deletions
diff --git a/src/runtime/mpagealloc_32bit.go b/src/runtime/mpagealloc_32bit.go
index 03990e47cf..900146e363 100644
--- a/src/runtime/mpagealloc_32bit.go
+++ b/src/runtime/mpagealloc_32bit.go
@@ -93,18 +93,6 @@ func (p *pageAlloc) sysInit(test bool) {
reservation = add(reservation, uintptr(entries)*pallocSumBytes)
}
-
- if test {
- // Set up the scavenge index via sysAlloc so the test can free it later.
- scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{})
- p.scav.index.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, p.sysStat)))[:]
- p.summaryMappedReady += scavIndexSize
- } else {
- // Set up the scavenge index.
- p.scav.index.chunks = scavengeIndexArray[:]
- }
- p.scav.index.min.Store(1) // The 0th chunk is never going to be mapped for the heap.
- p.scav.index.max.Store(uintptr(len(p.scav.index.chunks)))
}
// See mpagealloc_64bit.go for details.
@@ -127,3 +115,26 @@ func (p *pageAlloc) sysGrow(base, limit uintptr) {
}
}
}
+
+// sysInit initializes the scavengeIndex' chunks array.
+//
+// Returns the amount of memory added to sysStat.
+func (s *scavengeIndex) sysInit(test bool, sysStat *sysMemStat) (mappedReady uintptr) {
+ if test {
+ // Set up the scavenge index via sysAlloc so the test can free it later.
+ scavIndexSize := uintptr(len(scavengeIndexArray)) * unsafe.Sizeof(atomicScavChunkData{})
+ s.chunks = ((*[(1 << heapAddrBits) / pallocChunkBytes]atomicScavChunkData)(sysAlloc(scavIndexSize, sysStat)))[:]
+ mappedReady = scavIndexSize
+ } else {
+ // Set up the scavenge index.
+ s.chunks = scavengeIndexArray[:]
+ }
+ s.min.Store(1) // The 0th chunk is never going to be mapped for the heap.
+ s.max.Store(uintptr(len(s.chunks)))
+ return
+}
+
+// sysGrow is a no-op on 32-bit platforms.
+func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintptr {
+ return 0
+}