aboutsummaryrefslogtreecommitdiff
path: root/src/runtime
diff options
context:
space:
mode:
authorJes Cok <xigua67damn@gmail.com>2025-07-28 11:36:17 +0000
committerGopher Robot <gobot@golang.org>2025-07-28 11:13:58 -0700
commite151db3e065eea8a13fd2bc83aafb6959edd6fca (patch)
tree5cddf59378c70f7589e724c7493c404903afe9b8 /src/runtime
parent4569255f8ce8ee744e34e44465444d6d58d349de (diff)
downloadgo-e151db3e065eea8a13fd2bc83aafb6959edd6fca.tar.xz
all: omit unnecessary type conversions
Found by github.com/mdempsky/unconvert Change-Id: Ib78cceb718146509d96dbb6da87b27dbaeba1306 GitHub-Last-Rev: dedf354811701ce8920c305b6f7aa78914a4171c GitHub-Pull-Request: golang/go#74771 Reviewed-on: https://go-review.googlesource.com/c/go/+/690735 Reviewed-by: Mark Freeman <mark@golang.org> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@google.com> Auto-Submit: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'src/runtime')
-rw-r--r--src/runtime/heapdump.go4
-rw-r--r--src/runtime/mcleanup.go4
-rw-r--r--src/runtime/mgcmark.go2
-rw-r--r--src/runtime/mgcsweep.go12
-rw-r--r--src/runtime/mheap.go16
-rw-r--r--src/runtime/slice.go2
-rw-r--r--src/runtime/traceallocfree.go2
-rw-r--r--src/runtime/tracebuf.go4
-rw-r--r--src/runtime/tracecpu.go2
-rw-r--r--src/runtime/traceevent.go2
-rw-r--r--src/runtime/traceruntime.go4
-rw-r--r--src/runtime/tracestack.go2
-rw-r--r--src/runtime/tracetype.go2
13 files changed, 29 insertions, 29 deletions
diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go
index 5476035b2e..72878d0728 100644
--- a/src/runtime/heapdump.go
+++ b/src/runtime/heapdump.go
@@ -460,7 +460,7 @@ func dumproots() {
continue
}
spf := (*specialfinalizer)(unsafe.Pointer(sp))
- p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
+ p := unsafe.Pointer(s.base() + spf.special.offset)
dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
}
}
@@ -659,7 +659,7 @@ func dumpmemprof() {
continue
}
spp := (*specialprofile)(unsafe.Pointer(sp))
- p := s.base() + uintptr(spp.special.offset)
+ p := s.base() + spp.special.offset
dumpint(tagAllocSample)
dumpint(uint64(p))
dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
diff --git a/src/runtime/mcleanup.go b/src/runtime/mcleanup.go
index c368730c57..383217aa05 100644
--- a/src/runtime/mcleanup.go
+++ b/src/runtime/mcleanup.go
@@ -173,14 +173,14 @@ func (c Cleanup) Stop() {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind &&
+ if offset == s.offset && _KindSpecialCleanup == s.kind &&
(*specialCleanup)(unsafe.Pointer(s)).id == c.id {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go
index a0087ab6e0..8b306045c5 100644
--- a/src/runtime/mgcmark.go
+++ b/src/runtime/mgcmark.go
@@ -415,7 +415,7 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
// Don't mark finalized object, but scan it so we retain everything it points to.
// A finalizer can be set for an inner byte of an object, find object beginning.
- p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+ p := s.base() + spf.special.offset/s.elemsize*s.elemsize
// Mark everything that can be reached from
// the object (but *not* the object itself or
diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go
index 1605c21966..b72cc461ba 100644
--- a/src/runtime/mgcsweep.go
+++ b/src/runtime/mgcsweep.go
@@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
siter := newSpecialsIter(s)
for siter.valid() {
// A finalizer can be set for an inner byte of an object, find object beginning.
- objIndex := uintptr(siter.s.offset) / size
+ objIndex := siter.s.offset / size
p := s.base() + objIndex*size
mbits := s.markBitsForIndex(objIndex)
if !mbits.isMarked() {
@@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 1: see if it has a finalizer.
hasFinAndRevived := false
endOffset := p - s.base() + size
- for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
+ for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
if tmp.kind == _KindSpecialFinalizer {
// Stop freeing of object if it has a finalizer.
mbits.setMarkedNonAtomic()
@@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
// Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
// before finalization as specified by the weak package. See the documentation
// for that package for more details.
- for siter.valid() && uintptr(siter.s.offset) < endOffset {
+ for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
- p := s.base() + uintptr(special.offset)
+ p := s.base() + special.offset
if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
@@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
}
} else {
// Pass 2: the object is truly dead, free (and handle) all specials.
- for siter.valid() && uintptr(siter.s.offset) < endOffset {
+ for siter.valid() && siter.s.offset < endOffset {
// Find the exact byte for which the special was setup
// (as opposed to object beginning).
special := siter.s
- p := s.base() + uintptr(special.offset)
+ p := s.base() + special.offset
siter.unlinkAndNext()
freeSpecial(special, unsafe.Pointer(p), size)
}
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index d8193ddb46..cb0d340048 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -1488,7 +1488,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
s.allocBits = newAllocBits(uintptr(s.nelems))
// Adjust s.limit down to the object-containing part of the span.
- s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
+ s.limit = s.base() + s.elemsize*uintptr(s.nelems)
// It's safe to access h.sweepgen without the heap lock because it's
// only ever updated with the world stopped and we run on the
@@ -2152,11 +2152,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special,
if s == nil {
break
}
- if offset == uintptr(s.offset) && kind == s.kind {
+ if offset == s.offset && kind == s.kind {
found = true
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
+ if offset < s.offset || (offset == s.offset && kind < s.kind) {
break
}
iter = &s.next
@@ -2323,14 +2323,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = (*specialCheckFinalizer)(unsafe.Pointer(s))
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
@@ -2373,14 +2373,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) {
// Reached the end of the linked list. Stop searching at this point.
break
}
- if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+ if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
(*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
// The special is a cleanup and contains a matching cleanup id.
*iter = s.next
found = s
break
}
- if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+ if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
// The special is outside the region specified for that kind of
// special. The specials are sorted by kind.
break
@@ -2476,7 +2476,7 @@ type specialWeakHandle struct {
//go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
- return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p)))
+ return unsafe.Pointer(getOrAddWeakHandle(p))
}
//go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
diff --git a/src/runtime/slice.go b/src/runtime/slice.go
index 79d3f6c0de..e31d5dccb2 100644
--- a/src/runtime/slice.go
+++ b/src/runtime/slice.go
@@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte {
panicmakeslicelen()
}
cap := roundupsize(uintptr(len), true)
- return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
+ return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
}
diff --git a/src/runtime/traceallocfree.go b/src/runtime/traceallocfree.go
index 70e48ea3a6..b1b6c63462 100644
--- a/src/runtime/traceallocfree.go
+++ b/src/runtime/traceallocfree.go
@@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) {
}
// Emit info.
- w.varint(uint64(trace.minPageHeapAddr))
+ w.varint(trace.minPageHeapAddr)
w.varint(uint64(pageSize))
w.varint(uint64(gc.MinHeapAlign))
w.varint(uint64(fixedStack))
diff --git a/src/runtime/tracebuf.go b/src/runtime/tracebuf.go
index 08a1d46838..5adaede424 100644
--- a/src/runtime/tracebuf.go
+++ b/src/runtime/tracebuf.go
@@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter {
// Tolerate a nil mp.
mID := ^uint64(0)
if w.mp != nil {
- mID = uint64(w.mp.procid)
+ mID = w.mp.procid
}
// Write the buffer's header.
@@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter {
w.byte(byte(w.exp))
}
w.varint(uint64(w.gen))
- w.varint(uint64(mID))
+ w.varint(mID)
w.varint(uint64(ts))
w.traceBuf.lenPos = w.varintReserve()
return w
diff --git a/src/runtime/tracecpu.go b/src/runtime/tracecpu.go
index 092c707f83..e64ca32cdf 100644
--- a/src/runtime/tracecpu.go
+++ b/src/runtime/tracecpu.go
@@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
if gp != nil {
hdr[1] = gp.goid
}
- hdr[2] = uint64(mp.procid)
+ hdr[2] = mp.procid
// Allow only one writer at a time
for !trace.signalLock.CompareAndSwap(0, 1) {
diff --git a/src/runtime/traceevent.go b/src/runtime/traceevent.go
index 263847be2e..b0bc4c017d 100644
--- a/src/runtime/traceevent.go
+++ b/src/runtime/traceevent.go
@@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.
tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
}
if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
- tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
+ tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
}
return traceEventWriter{tl}
}
diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go
index a2775a3427..06e36fd802 100644
--- a/src/runtime/traceruntime.go
+++ b/src/runtime/traceruntime.go
@@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() {
// GoStop emits a GoStop event with the provided reason.
func (tl traceLocker) GoStop(reason traceGoStopReason) {
- tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0))
+ tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
}
// GoPark emits a GoBlock event with the provided reason.
@@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) {
// TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
// that we have both, and waitReason is way more descriptive.
func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
- tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
+ tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
}
// GoUnpark emits a GoUnblock event.
diff --git a/src/runtime/tracestack.go b/src/runtime/tracestack.go
index 76d6b05048..51f3c29445 100644
--- a/src/runtime/tracestack.go
+++ b/src/runtime/tracestack.go
@@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW
// Emit stack event.
w.byte(byte(tracev2.EvStack))
- w.varint(uint64(node.id))
+ w.varint(node.id)
w.varint(uint64(len(frames)))
for _, frame := range frames {
w.varint(uint64(frame.PC))
diff --git a/src/runtime/tracetype.go b/src/runtime/tracetype.go
index f54f812578..613fc88202 100644
--- a/src/runtime/tracetype.go
+++ b/src/runtime/tracetype.go
@@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
}
// Emit type.
- w.varint(uint64(node.id))
+ w.varint(node.id)
w.varint(uint64(uintptr(unsafe.Pointer(typ))))
w.varint(uint64(typ.Size()))
w.varint(uint64(typ.PtrBytes))