diff options
| author | Michael Anthony Knyszek <mknyszek@google.com> | 2022-05-03 19:28:25 +0000 |
|---|---|---|
| committer | Gopher Robot <gobot@golang.org> | 2022-05-03 19:58:15 +0000 |
| commit | 7c404d59db3591a7c5854b38dc0f05fcb7ac0cff (patch) | |
| tree | c53ce941c2e4672bffa7116530cea819260dca7f /src/runtime/mcache.go | |
| parent | bccce9028996502e62a92255d79d5e003fbae63b (diff) | |
| download | go-7c404d59db3591a7c5854b38dc0f05fcb7ac0cff.tar.xz | |
runtime: store consistent total allocation stats as uint64
Currently the consistent total allocation stats are managed as uintptrs,
which means they can easily overflow on 32-bit systems. Fix this by
storing these stats as uint64s. This will cause some minor performance
degradation on 32-bit systems, but there really isn't a way around this,
and it affects the correctness of the metrics we export.
Fixes #52680.
Change-Id: I7e6ca44047d46b4bd91c6f87c2d29f730e0d6191
Reviewed-on: https://go-review.googlesource.com/c/go/+/403758
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
Diffstat (limited to 'src/runtime/mcache.go')
| -rw-r--r-- | src/runtime/mcache.go | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index 5a74431ff4..7c785900db 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -159,18 +159,18 @@ func (c *mcache) refill(spc spanClass) { // Count up how many slots were used and record it. stats := memstats.heapStats.acquire() - slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache) - atomic.Xadduintptr(&stats.smallAllocCount[spc.sizeclass()], slotsUsed) + slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) + atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed) // Flush tinyAllocs. if spc == tinySpanClass { - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 } memstats.heapStats.release() // Count the allocs in inconsistent, internal stats. - bytesAllocated := int64(slotsUsed * s.elemsize) + bytesAllocated := slotsUsed * int64(s.elemsize) gcController.totalAlloc.Add(bytesAllocated) // Update heapLive and flush scanAlloc. @@ -224,8 +224,8 @@ func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan { // Count the alloc in consistent, external stats. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.largeAlloc, npages*pageSize) - atomic.Xadduintptr(&stats.largeAllocCount, 1) + atomic.Xadd64(&stats.largeAlloc, int64(npages*pageSize)) + atomic.Xadd64(&stats.largeAllocCount, 1) memstats.heapStats.release() // Count the alloc in inconsistent, internal stats. @@ -250,17 +250,17 @@ func (c *mcache) releaseAll() { for i := range c.alloc { s := c.alloc[i] if s != &emptymspan { - slotsUsed := uintptr(s.allocCount) - uintptr(s.allocCountBeforeCache) + slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) s.allocCountBeforeCache = 0 // Adjust smallAllocCount for whatever was allocated. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed) + atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed) memstats.heapStats.release() // Adjust the actual allocs in inconsistent, internal stats. // We assumed earlier that the full span gets allocated. - gcController.totalAlloc.Add(int64(slotsUsed * s.elemsize)) + gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize)) // Release the span to the mcentral. mheap_.central[i].mcentral.uncacheSpan(s) @@ -273,7 +273,7 @@ func (c *mcache) releaseAll() { // Flush tinyAllocs. stats := memstats.heapStats.acquire() - atomic.Xadduintptr(&stats.tinyAllocCount, c.tinyAllocs) + atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) c.tinyAllocs = 0 memstats.heapStats.release() |
