aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mcache.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mcache.go')
-rw-r--r--src/runtime/mcache.go52
1 files changed, 51 insertions, 1 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go
index cade81031d..82872f1454 100644
--- a/src/runtime/mcache.go
+++ b/src/runtime/mcache.go
@@ -44,7 +44,17 @@ type mcache struct {
// The rest is not accessed on every malloc.
- alloc [numSpanClasses]*mspan // spans to allocate from, indexed by spanClass
+ // alloc contains spans to allocate from, indexed by spanClass.
+ alloc [numSpanClasses]*mspan
+
+ // TODO(thepudds): better to interleave alloc and reusableScan/reusableNoscan so that
+ // a single malloc call can often access both in the same cache line for a given spanClass.
+ // It's not interleaved right now in part to have slightly smaller diff, and might be
+ // negligible effect on current microbenchmarks.
+
+ // reusableNoscan contains linked lists of reusable noscan heap objects, indexed by spanClass.
+ // The next pointers are stored in the first word of the heap objects.
+ reusableNoscan [numSpanClasses]gclinkptr
stackcache [_NumStackOrders]stackfreelist
@@ -96,6 +106,7 @@ func allocmcache() *mcache {
c.alloc[i] = &emptymspan
}
c.nextSample = nextSample()
+
return c
}
@@ -153,6 +164,16 @@ func (c *mcache) refill(spc spanClass) {
if s.allocCount != s.nelems {
throw("refill of span with free space remaining")
}
+
+ // TODO(thepudds): we might be able to allow mallocgcTiny to reuse 16 byte objects from spc==5,
+ // but for now, just clear our reusable objects for tinySpanClass.
+ if spc == tinySpanClass {
+ c.reusableNoscan[spc] = 0
+ }
+ if c.reusableNoscan[spc] != 0 {
+ throw("refill of span with reusable pointers remaining on pointer free list")
+ }
+
if s != &emptymspan {
// Mark this span as no longer cached.
if s.sweepgen != mheap_.sweepgen+3 {
@@ -312,6 +333,13 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0
memstats.heapStats.release()
+ // Clear the reusable linked lists.
+ // For noscan objects, the nodes of the linked lists are the reusable heap objects themselves,
+ // so we can simply clear the linked list head pointers.
+ // TODO(thepudds): consider having debug logging of a non-empty reusable lists getting cleared,
+ // maybe based on the existing debugReusableLog.
+ clear(c.reusableNoscan[:])
+
// Update heapLive and heapScan.
gcController.update(dHeapLive, scanAlloc)
}
@@ -339,3 +367,25 @@ func (c *mcache) prepareForSweep() {
stackcache_clear(c)
c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
}
+
+// addReusableNoscan adds a noscan object pointer to the reusable pointer free list
+// for a span class.
+func (c *mcache) addReusableNoscan(spc spanClass, ptr uintptr) {
+ if !runtimeFreegcEnabled {
+ return
+ }
+
+ // Add to the reusable pointers free list.
+ v := gclinkptr(ptr)
+ v.ptr().next = c.reusableNoscan[spc]
+ c.reusableNoscan[spc] = v
+}
+
+// hasReusableNoscan reports whether there is a reusable object available for
+// a noscan spc.
+func (c *mcache) hasReusableNoscan(spc spanClass) bool {
+ if !runtimeFreegcEnabled {
+ return false
+ }
+ return c.reusableNoscan[spc] != 0
+}