diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2022-09-20 18:33:03 +0000 |
|---|---|---|
| committer | Michael Knyszek <mknyszek@google.com> | 2022-10-12 20:23:24 +0000 |
| commit | 4c383951b9601b488486add020ad5b7f10fb3d39 (patch) | |
| tree | 71e91c6457849aa6c6e4b2b6d6cf665590ed86e5 /src/runtime/malloc.go | |
| parent | 987f94fa038d4a66fa174ddc4267d8718a029581 (diff) | |
| download | go-4c383951b9601b488486add020ad5b7f10fb3d39.tar.xz | |
runtime: make (*mheap).sysAlloc more general
This change makes (*mheap).sysAlloc take an explicit list of hints and a
boolean as to whether or not any newly-created heapArenas should be
registered in the full arena list.
This is a refactoring in preparation for arenas.
For #51317.
Change-Id: I0584a033fce3fcb60c5d0bc033d5fb8bd23b2378
Reviewed-on: https://go-review.googlesource.com/c/go/+/432078
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Diffstat (limited to 'src/runtime/malloc.go')
| -rw-r--r-- | src/runtime/malloc.go | 69 |
1 files changed, 41 insertions, 28 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index c18ed07d49..d651cbc14e 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -556,26 +556,37 @@ func mallocinit() { // heapArenaBytes. sysAlloc returns nil on failure. // There is no corresponding free function. // +// hintList is a list of hint addresses for where to allocate new +// heap arenas. It must be non-nil. +// +// register indicates whether the heap arena should be registered +// in allArenas. +// // sysAlloc returns a memory region in the Reserved state. This region must // be transitioned to Prepared and then Ready before use. // // h must be locked. -func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { +func (h *mheap) sysAlloc(n uintptr, hintList **arenaHint, register bool) (v unsafe.Pointer, size uintptr) { assertLockHeld(&h.lock) n = alignUp(n, heapArenaBytes) - // First, try the arena pre-reservation. - // Newly-used mappings are considered released. - v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) - if v != nil { - size = n - goto mapped + if hintList == &h.arenaHints { + // First, try the arena pre-reservation. + // Newly-used mappings are considered released. + // + // Only do this if we're using the regular heap arena hints. + // This behavior is only for the heap. + v = h.arena.alloc(n, heapArenaBytes, &gcController.heapReleased) + if v != nil { + size = n + goto mapped + } } // Try to grow the heap at a hint address. - for h.arenaHints != nil { - hint := h.arenaHints + for *hintList != nil { + hint := *hintList p := hint.addr if hint.down { p -= n @@ -607,7 +618,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { if v != nil { sysFreeOS(v, n) } - h.arenaHints = hint.next + *hintList = hint.next h.arenaHintAlloc.free(unsafe.Pointer(hint)) } @@ -692,26 +703,28 @@ mapped: } } - // Add the arena to the arenas list. - if len(h.allArenas) == cap(h.allArenas) { - size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize - if size == 0 { - size = physPageSize - } - newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) - if newArray == nil { - throw("out of memory allocating allArenas") + // Register the arena in allArenas if requested. + if register { + if len(h.allArenas) == cap(h.allArenas) { + size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize + if size == 0 { + size = physPageSize + } + newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys)) + if newArray == nil { + throw("out of memory allocating allArenas") + } + oldSlice := h.allArenas + *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)} + copy(h.allArenas, oldSlice) + // Do not free the old backing array because + // there may be concurrent readers. Since we + // double the array each time, this can lead + // to at most 2x waste. } - oldSlice := h.allArenas - *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)} - copy(h.allArenas, oldSlice) - // Do not free the old backing array because - // there may be concurrent readers. Since we - // double the array each time, this can lead - // to at most 2x waste. + h.allArenas = h.allArenas[:len(h.allArenas)+1] + h.allArenas[len(h.allArenas)-1] = ri } - h.allArenas = h.allArenas[:len(h.allArenas)+1] - h.allArenas[len(h.allArenas)-1] = ri // Store atomically just in case an object from the // new heap arena becomes visible before the heap lock |
