aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mheap.go
diff options
context:
space:
mode:
Diffstat (limited to 'src/runtime/mheap.go')
-rw-r--r--src/runtime/mheap.go35
1 files changed, 14 insertions, 21 deletions
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 49d1177005..fcdd24c16e 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -921,7 +921,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan {
//
// allocManual adds the bytes used to *stat, which should be a
// memstats in-use field. Unlike allocations in the GC'd heap, the
-// allocation does *not* count toward heap_inuse or heap_sys.
+// allocation does *not* count toward heapInUse.
//
// The memory backing the returned span may not be zeroed if
// span.needzero is set.
@@ -1279,15 +1279,12 @@ HaveSpan:
// sysUsed all the pages that are actually available
// in the span since some of them might be scavenged.
sysUsed(unsafe.Pointer(base), nbytes, scav)
- atomic.Xadd64(&memstats.heap_released, -int64(scav))
+ memstats.heapReleased.add(-int64(scav))
}
// Update stats.
+ memstats.heapFree.add(-int64(nbytes - scav))
if typ == spanAllocHeap {
- atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
- }
- if typ.manual() {
- // Manually managed memory doesn't count toward heap_sys.
- memstats.heap_sys.add(-int64(nbytes))
+ memstats.heapInUse.add(int64(nbytes))
}
// Update consistent stats.
stats := memstats.heapStats.acquire()
@@ -1359,7 +1356,8 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
// current arena, so we have to request the full ask.
av, asize := h.sysAlloc(ask)
if av == nil {
- print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
+ inUse := memstats.heapFree.load() + memstats.heapReleased.load() + memstats.heapInUse.load()
+ print("runtime: out of memory: cannot allocate ", ask, "-byte block (", inUse, " in use)\n")
return 0, false
}
@@ -1375,9 +1373,8 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
// Transition this space from Reserved to Prepared and mark it
// as released since we'll be able to start using it after updating
// the page allocator and releasing the lock at any time.
- sysMap(unsafe.Pointer(h.curArena.base), size, &memstats.heap_sys)
+ sysMap(unsafe.Pointer(h.curArena.base), size, &memstats.heapReleased)
// Update stats.
- atomic.Xadd64(&memstats.heap_released, int64(size))
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(size))
memstats.heapStats.release()
@@ -1403,15 +1400,14 @@ func (h *mheap) grow(npage uintptr) (uintptr, bool) {
h.curArena.base = nBase
// Transition the space we're going to use from Reserved to Prepared.
- sysMap(unsafe.Pointer(v), nBase-v, &memstats.heap_sys)
-
- // The memory just allocated counts as both released
- // and idle, even though it's not yet backed by spans.
//
// The allocation is always aligned to the heap arena
// size which is always > physPageSize, so its safe to
- // just add directly to heap_released.
- atomic.Xadd64(&memstats.heap_released, int64(nBase-v))
+ // just add directly to heapReleased.
+ sysMap(unsafe.Pointer(v), nBase-v, &memstats.heapReleased)
+
+ // The memory just allocated counts as both released
+ // and idle, even though it's not yet backed by spans.
stats := memstats.heapStats.acquire()
atomic.Xaddint64(&stats.released, int64(nBase-v))
memstats.heapStats.release()
@@ -1488,12 +1484,9 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
//
// Mirrors the code in allocSpan.
nbytes := s.npages * pageSize
+ memstats.heapFree.add(int64(nbytes))
if typ == spanAllocHeap {
- atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
- }
- if typ.manual() {
- // Manually managed memory doesn't count toward heap_sys, so add it back.
- memstats.heap_sys.add(int64(nbytes))
+ memstats.heapInUse.add(-int64(nbytes))
}
// Update consistent stats.
stats := memstats.heapStats.acquire()