diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2019-09-16 21:23:24 +0000 |
|---|---|---|
| committer | Michael Knyszek <mknyszek@google.com> | 2019-11-08 18:00:54 +0000 |
| commit | a2cd2bd55d1e932b52f0b7dea45a85e058fc77d5 (patch) | |
| tree | 1d58c63c070f9e46bc68dd8c33d11cac61035a86 /src/runtime/export_test.go | |
| parent | 81640ea38dc6577bdf1b206b778b968d341c27eb (diff) | |
| download | go-a2cd2bd55d1e932b52f0b7dea45a85e058fc77d5.tar.xz | |
runtime: add per-p page allocation cache
This change adds a per-p free page cache which the page allocator may
allocate out of without a lock. The change also introduces a completely
lockless page allocator fast path.
Although the cache contains at most 64 pages (and usually less), the
vast majority (85%+) of page allocations are exactly 1 page in size.
Updates #35112.
Change-Id: I170bf0a9375873e7e3230845eb1df7e5cf741b78
Reviewed-on: https://go-review.googlesource.com/c/go/+/195701
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/export_test.go')
| -rw-r--r-- | src/runtime/export_test.go | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index b1ebfba0d1..ea3f1c1776 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -7,6 +7,7 @@ package runtime import ( + "math/bits" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -358,6 +359,10 @@ func ReadMemStatsSlow() (base, slow MemStats) { pg := mheap_.pages.chunks[i].scavenged.popcntRange(0, pallocChunkPages) slow.HeapReleased += uint64(pg) * pageSize } + for _, p := range allp { + pg := bits.OnesCount64(p.pcache.scav) + slow.HeapReleased += uint64(pg) * pageSize + } // Unused space in the current arena also counts as released space. slow.HeapReleased += uint64(mheap_.curArena.end - mheap_.curArena.base) @@ -879,3 +884,20 @@ func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) { }) return } + +func PageCachePagesLeaked() (leaked uintptr) { + stopTheWorld("PageCachePagesLeaked") + + // Walk over destroyed Ps and look for unflushed caches. + deadp := allp[len(allp):cap(allp)] + for _, p := range deadp { + // Since we're going past len(allp) we may see nil Ps. + // Just ignore them. + if p != nil { + leaked += uintptr(bits.OnesCount64(p.pcache.cache)) + } + } + + startTheWorld() + return +} |
