aboutsummaryrefslogtreecommitdiff
path: root/src/runtime/mgcsweep.go
diff options
context:
space:
mode:
authorCherry Mui <cherryyz@google.com>2022-11-16 17:32:08 -0500
committerCherry Mui <cherryyz@google.com>2023-10-02 20:39:21 +0000
commit340a4f55c4afac5b74c8df8365decb0c0237d710 (patch)
tree6c323e9e445ec5693c9c58e102dbe677e2735c6a /src/runtime/mgcsweep.go
parent32b6d2d9a849a0f0120e9139b403831669373b79 (diff)
downloadgo-340a4f55c4afac5b74c8df8365decb0c0237d710.tar.xz
runtime: use smaller fields for mspan.freeindex and nelems
mspan.freeindex and nelems can fit into uint16 for all possible values. Use uint16 instead of uintptr. Change-Id: Ifce20751e81d5022be1f6b5cbb5fbe4fd1728b1b Reviewed-on: https://go-review.googlesource.com/c/go/+/451359 Reviewed-by: Michael Knyszek <mknyszek@google.com> Reviewed-by: Matthew Dempsky <mdempsky@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src/runtime/mgcsweep.go')
-rw-r--r--src/runtime/mgcsweep.go16
1 files changed, 8 insertions, 8 deletions
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 68f1aae600..986eb573ca 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -602,8 +602,8 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// efficient; allocfreetrace has massive overhead.
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
- if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
+ for i := uintptr(0); i < uintptr(s.nelems); i++ {
+ if !mbits.isMarked() && (abits.index < uintptr(s.freeindex) || abits.isMarked()) {
x := s.base() + i*s.elemsize
if debug.allocfreetrace != 0 {
tracefree(unsafe.Pointer(x), size)
@@ -634,12 +634,12 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
//
// Check the first bitmap byte, where we have to be
// careful with freeindex.
- obj := s.freeindex
+ obj := uintptr(s.freeindex)
if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 {
s.reportZombies()
}
// Check remaining bytes.
- for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ {
+ for i := obj/8 + 1; i < divRoundUp(uintptr(s.nelems), 8); i++ {
if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 {
s.reportZombies()
}
@@ -666,7 +666,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// gcmarkBits becomes the allocBits.
// get a fresh cleared gcmarkBits in preparation for next GC
s.allocBits = s.gcmarkBits
- s.gcmarkBits = newMarkBits(s.nelems)
+ s.gcmarkBits = newMarkBits(uintptr(s.nelems))
// refresh pinnerBits if they exists
if s.pinnerBits != nil {
@@ -760,7 +760,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
return true
}
// Return span back to the right mcentral list.
- if uintptr(nalloc) == s.nelems {
+ if nalloc == s.nelems {
mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
} else {
mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
@@ -829,10 +829,10 @@ func (s *mspan) reportZombies() {
print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n")
mbits := s.markBitsForBase()
abits := s.allocBitsForIndex(0)
- for i := uintptr(0); i < s.nelems; i++ {
+ for i := uintptr(0); i < uintptr(s.nelems); i++ {
addr := s.base() + i*s.elemsize
print(hex(addr))
- alloc := i < s.freeindex || abits.isMarked()
+ alloc := i < uintptr(s.freeindex) || abits.isMarked()
if alloc {
print(" alloc")
} else {