diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2019-08-21 00:24:25 +0000 |
|---|---|---|
| committer | Michael Knyszek <mknyszek@google.com> | 2019-11-07 19:14:27 +0000 |
| commit | 73317080e12234defb59f84e2b5b15f69650b5d5 (patch) | |
| tree | 299810e966e35857625c1323fe0da5d3c6cd4c88 /src/runtime/mpagealloc_test.go | |
| parent | 39e8cb0faac7785f89b21246a45e8cf8d5bc7d95 (diff) | |
| download | go-73317080e12234defb59f84e2b5b15f69650b5d5.tar.xz | |
runtime: add scavenging code for new page allocator
This change adds a scavenger for the new page allocator along with
tests. The scavenger walks over the heap backwards once per GC, looking
for memory to scavenge. It walks across the heap without any lock held,
searching optimistically. If it finds what appears to be a scavenging
candidate it acquires the heap lock and attempts to verify it. Upon
verification it then scavenges.
Notably, unlike the old scavenger, it doesn't show any preference for
huge pages and instead follows a more strict last-page-first policy.
Updates #35112.
Change-Id: I0621ef73c999a471843eab2d1307ae5679dd18d6
Reviewed-on: https://go-review.googlesource.com/c/go/+/195697
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mpagealloc_test.go')
| -rw-r--r-- | src/runtime/mpagealloc_test.go | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/src/runtime/mpagealloc_test.go b/src/runtime/mpagealloc_test.go index 8d304a0ced..f7380a7241 100644 --- a/src/runtime/mpagealloc_test.go +++ b/src/runtime/mpagealloc_test.go @@ -23,8 +23,12 @@ func checkPageAlloc(t *testing.T, want, got *PageAlloc) { for i := gotStart; i < gotEnd; i++ { // Check the bitmaps. - if !checkPallocBits(t, got.PallocBits(i), want.PallocBits(i)) { - t.Logf("in chunk %d", i) + gb, wb := got.PallocData(i), want.PallocData(i) + if !checkPallocBits(t, gb.PallocBits(), wb.PallocBits()) { + t.Logf("in chunk %d (mallocBits)", i) + } + if !checkPallocBits(t, gb.Scavenged(), wb.Scavenged()) { + t.Logf("in chunk %d (scavenged)", i) } } // TODO(mknyszek): Verify summaries too? @@ -310,7 +314,7 @@ func TestPageAllocAlloc(t *testing.T) { for name, v := range tests { v := v t.Run(name, func(t *testing.T) { - b := NewPageAlloc(v.before) + b := NewPageAlloc(v.before, nil) defer FreePageAlloc(b) for iter, i := range v.hits { @@ -318,7 +322,7 @@ func TestPageAllocAlloc(t *testing.T) { t.Fatalf("bad alloc #%d: want 0x%x, got 0x%x", iter+1, i.base, a) } } - want := NewPageAlloc(v.after) + want := NewPageAlloc(v.after, nil) defer FreePageAlloc(want) checkPageAlloc(t, want, b) @@ -335,7 +339,7 @@ func TestPageAllocExhaust(t *testing.T) { for i := ChunkIdx(0); i < 4; i++ { bDesc[BaseChunkIdx+i] = []BitRange{} } - b := NewPageAlloc(bDesc) + b := NewPageAlloc(bDesc, nil) defer FreePageAlloc(b) // Allocate into b with npages until we've exhausted the heap. @@ -366,7 +370,7 @@ func TestPageAllocExhaust(t *testing.T) { wantDesc[BaseChunkIdx+i] = []BitRange{} } } - want := NewPageAlloc(wantDesc) + want := NewPageAlloc(wantDesc, nil) defer FreePageAlloc(want) // Check to make sure the heap b matches what we want. @@ -590,14 +594,15 @@ func TestPageAllocFree(t *testing.T) { for name, v := range tests { v := v t.Run(name, func(t *testing.T) { - b := NewPageAlloc(v.before) + b := NewPageAlloc(v.before, nil) defer FreePageAlloc(b) + for _, addr := range v.frees { b.Free(addr, v.npages) } - - want := NewPageAlloc(v.after) + want := NewPageAlloc(v.after, nil) defer FreePageAlloc(want) + checkPageAlloc(t, want, b) }) } @@ -641,7 +646,7 @@ func TestPageAllocAllocAndFree(t *testing.T) { for name, v := range tests { v := v t.Run(name, func(t *testing.T) { - b := NewPageAlloc(v.init) + b := NewPageAlloc(v.init, nil) defer FreePageAlloc(b) for iter, i := range v.hits { |
