aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcentral.go
diff options
context:
space:
mode:
authorRick Hudson <rlh@golang.org>2016-02-11 13:57:58 -0500
committerRick Hudson <rlh@golang.org>2016-04-27 21:54:47 +0000
commit3479b065d43f2990ac12e7b00ddff6f63a876ca9 (patch)
tree491ad3b3ea77e4e4a9202ebaaa933296206007da /src/runtime/mcentral.go
parentdc65a82eff0a3af5a26f6c6d31c53bdac9b31168 (diff)
downloadgo-3479b065d43f2990ac12e7b00ddff6f63a876ca9.tar.xz
[dev.garbage] runtime: allocate directly from GC mark bits
Instead of building a freelist from the mark bits generated by the GC this CL allocates directly from the mark bits. The approach moves the mark bits from the pointer/no pointer heap structures into their own per span data structures. The mark/allocation vectors consist of a single mark bit per object. Two vectors are maintained, one for allocation and one for the GC's mark phase. During the GC cycle's sweep phase the interpretation of the vectors is swapped. The mark vector becomes the allocation vector and the old allocation vector is cleared and becomes the mark vector that the next GC cycle will use. Marked entries in the allocation vector indicate that the object is not free. Each allocation vector maintains a boundary between areas of the span already allocated from and areas not yet allocated from. As objects are allocated this boundary is moved until it reaches the end of the span. At this point further allocations will be done from another span. Since we no longer sweep a span inspecting each freed object the responsibility for maintaining pointer/scalar bits in the heapBitMap containing is now the responsibility of the the routines doing the actual allocation. This CL is functionally complete and ready for performance tuning. Change-Id: I336e0fc21eef1066e0b68c7067cc71b9f3d50e04 Reviewed-on: https://go-review.googlesource.com/19470 Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mcentral.go')
-rw-r--r--src/runtime/mcentral.go35
1 files changed, 8 insertions, 27 deletions
diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go
index baca157db9..47d3ae2f81 100644
--- a/src/runtime/mcentral.go
+++ b/src/runtime/mcentral.go
@@ -18,7 +18,7 @@ import "runtime/internal/atomic"
type mcentral struct {
lock mutex
sizeclass int32
- nonempty mSpanList // list of spans with a free object
+ nonempty mSpanList // list of spans with a free object, ie a nonempty free list
empty mSpanList // list of spans with no free objects (or cached in an mcache)
}
@@ -67,7 +67,9 @@ retry:
c.empty.insertBack(s)
unlock(&c.lock)
s.sweep(true)
- if s.freelist.ptr() != nil {
+ freeIndex := s.nextFreeIndex(0)
+ if freeIndex != s.nelems {
+ s.freeindex = freeIndex
goto havespan
}
lock(&c.lock)
@@ -115,9 +117,6 @@ havespan:
// heap_live changed.
gcController.revise()
}
- if s.freelist.ptr() == nil {
- throw("freelist empty")
- }
s.incache = true
return s
}
@@ -150,15 +149,11 @@ func (c *mcentral) uncacheSpan(s *mspan) {
// the latest generation.
// If preserve=true, don't return the span to heap nor relink in MCentral lists;
// caller takes care of it.
-func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
+func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool, wasempty bool) bool {
if s.incache {
- throw("freespan into cached span")
+ throw("freeSpan given cached span")
}
- // Add the objects back to s's free list.
- wasempty := s.freelist.ptr() == nil
- end.ptr().next = s.freelist
- s.freelist = start
s.ref -= uint16(n)
if preserve {
@@ -190,16 +185,14 @@ func (c *mcentral) freeSpan(s *mspan, n int32, start gclinkptr, end gclinkptr, p
return false
}
- // s is completely freed, return it to the heap.
c.nonempty.remove(s)
s.needzero = 1
- s.freelist = 0
unlock(&c.lock)
mheap_.freeSpan(s, 0)
return true
}
-// Fetch a new span from the heap and carve into objects for the free list.
+// grow allocates a new empty span from the heap and initializes it for c's size class.
func (c *mcentral) grow() *mspan {
npages := uintptr(class_to_allocnpages[c.sizeclass])
size := uintptr(class_to_size[c.sizeclass])
@@ -212,19 +205,7 @@ func (c *mcentral) grow() *mspan {
p := uintptr(s.start << _PageShift)
s.limit = p + size*n
- head := gclinkptr(p)
- tail := gclinkptr(p)
- // i==0 iteration already done
- for i := uintptr(1); i < n; i++ {
- p += size
- tail.ptr().next = gclinkptr(p)
- tail = gclinkptr(p)
- }
- if s.freelist.ptr() != nil {
- throw("freelist not empty")
- }
- tail.ptr().next = 0
- s.freelist = head
+
heapBitsForSpan(s.base()).initSpan(s)
return s
}