diff options
| author | Austin Clements <austin@google.com> | 2018-09-26 14:20:58 -0400 |
|---|---|---|
| committer | Austin Clements <austin@google.com> | 2018-11-15 19:14:10 +0000 |
| commit | e500ffd88cb906014320607c6a03a5fd05ee84cf (patch) | |
| tree | f88b8f4372037b1e89608faaf91a6e8eab2a6453 /src | |
| parent | b2df0bd5f53e599fef9fcb808304dd0e5ea3ba0b (diff) | |
| download | go-e500ffd88cb906014320607c6a03a5fd05ee84cf.tar.xz | |
runtime: track all heap arenas in a slice
Currently, there's no efficient way to iterate over the Go heap. We're
going to need this for fast free page sweeping, so this CL adds a
slice of all allocated heap arenas. This will also be useful for
generational GC.
For #18155.
Change-Id: I58d126cfb9c3f61b3125d80b74ccb1b2169efbcc
Reviewed-on: https://go-review.googlesource.com/c/138076
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
Diffstat (limited to 'src')
| -rw-r--r-- | src/runtime/malloc.go | 21 | ||||
| -rw-r--r-- | src/runtime/mheap.go | 11 |
2 files changed, 31 insertions, 1 deletions
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 12fa744052..e827dbae93 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -641,6 +641,27 @@ mapped: } } + // Add the arena to the arenas list. + if len(h.allArenas) == cap(h.allArenas) { + size := 2 * uintptr(cap(h.allArenas)) * sys.PtrSize + if size == 0 { + size = physPageSize + } + newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys)) + if newArray == nil { + throw("out of memory allocating allArenas") + } + oldSlice := h.allArenas + *(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / sys.PtrSize)} + copy(h.allArenas, oldSlice) + // Do not free the old backing array because + // there may be concurrent readers. Since we + // double the array each time, this can lead + // to at most 2x waste. + } + h.allArenas = h.allArenas[:len(h.allArenas)+1] + h.allArenas[len(h.allArenas)-1] = ri + // Store atomically just in case an object from the // new heap arena becomes visible before the heap lock // is released (which shouldn't happen, but there's diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 97a0448ad3..c8b2b6524f 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -133,7 +133,16 @@ type mheap struct { // (the actual arenas). This is only used on 32-bit. arena linearAlloc - // _ uint32 // ensure 64-bit alignment of central + // allArenas is the arenaIndex of every mapped arena. This can + // be used to iterate through the address space. + // + // Access is protected by mheap_.lock. However, since this is + // append-only and old backing arrays are never freed, it is + // safe to acquire mheap_.lock, copy the slice header, and + // then release mheap_.lock. + allArenas []arenaIdx + + _ uint32 // ensure 64-bit alignment of central // central free lists for small size classes. // the padding makes sure that the mcentrals are |
