aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile
diff options
context:
space:
mode:
authorJes Cok <xigua67damn@gmail.com>2025-10-19 19:53:27 +0000
committerGopher Robot <gobot@golang.org>2025-10-21 08:13:08 -0700
commita5a249d6a64508376320bc48546a6a43aebecda7 (patch)
tree707720726df7c69b68536f55154a5fb0c2704763 /src/cmd/compile
parent694182d77b1a0e3676214ad0e361bdbdafde33a1 (diff)
downloadgo-a5a249d6a64508376320bc48546a6a43aebecda7.tar.xz
all: eliminate unnecessary type conversions
Found by github.com/mdempsky/unconvert Change-Id: I88ce10390a49ba768a4deaa0df9057c93c1164de GitHub-Last-Rev: 3b0f7e8f74f58340637f33287c238765856b2483 GitHub-Pull-Request: golang/go#75974 Reviewed-on: https://go-review.googlesource.com/c/go/+/712940 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Keith Randall <khr@golang.org> Auto-Submit: Keith Randall <khr@golang.org> Reviewed-by: Keith Randall <khr@google.com> Reviewed-by: David Chase <drchase@google.com>
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/internal/coverage/cover.go2
-rw-r--r--src/cmd/compile/internal/inline/inl.go2
-rw-r--r--src/cmd/compile/internal/ir/bitset.go2
-rw-r--r--src/cmd/compile/internal/loong64/ssa.go4
-rw-r--r--src/cmd/compile/internal/noder/reader.go2
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go2
-rw-r--r--src/cmd/compile/internal/ssa/biasedsparsemap.go2
-rw-r--r--src/cmd/compile/internal/ssa/debug.go8
-rw-r--r--src/cmd/compile/internal/ssa/magic_test.go28
-rw-r--r--src/cmd/compile/internal/ssa/memcombine.go2
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go16
-rw-r--r--src/cmd/compile/internal/ssa/sccp.go2
-rw-r--r--src/cmd/compile/internal/test/divconst_test.go8
13 files changed, 40 insertions, 40 deletions
diff --git a/src/cmd/compile/internal/coverage/cover.go b/src/cmd/compile/internal/coverage/cover.go
index 51f934f060..5ecd5271f6 100644
--- a/src/cmd/compile/internal/coverage/cover.go
+++ b/src/cmd/compile/internal/coverage/cover.go
@@ -131,7 +131,7 @@ func metaHashAndLen() ([16]byte, int) {
}
var hv [16]byte
for i := 0; i < 16; i++ {
- nib := string(mhash[i*2 : i*2+2])
+ nib := mhash[i*2 : i*2+2]
x, err := strconv.ParseInt(nib, 16, 32)
if err != nil {
base.Fatalf("metahash bad byte %q", nib)
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 9f03f6404a..3a9243e029 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -289,7 +289,7 @@ func CanInline(fn *ir.Func, profile *pgoir.Profile) {
// function is inlinable.
func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) {
if base.Flag.LowerM > 1 {
- fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body))
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), fn.Body)
} else if base.Flag.LowerM != 0 {
fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
}
diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go
index bae4005866..339e4e524f 100644
--- a/src/cmd/compile/internal/ir/bitset.go
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -23,7 +23,7 @@ func (f *bitset8) set2(shift uint8, b uint8) {
// Clear old bits.
*(*uint8)(f) &^= 3 << shift
// Set new bits.
- *(*uint8)(f) |= uint8(b&3) << shift
+ *(*uint8)(f) |= (b & 3) << shift
}
type bitset16 uint16
diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go
index bd0d96a695..5e55428283 100644
--- a/src/cmd/compile/internal/loong64/ssa.go
+++ b/src/cmd/compile/internal/loong64/ssa.go
@@ -1175,8 +1175,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.AddRestSourceArgs([]obj.Addr{
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 5) & 0x1fffffffff)},
- {Type: obj.TYPE_CONST, Offset: int64((v.AuxInt >> 0) & 0x1f)},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 5) & 0x1fffffffff},
+ {Type: obj.TYPE_CONST, Offset: (v.AuxInt >> 0) & 0x1f},
})
case ssa.OpLOONG64ADDshiftLLV:
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index a8a45b0269..d7dd58d8ca 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -3577,7 +3577,7 @@ func unifiedInlineCall(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlInd
edit(r.curfn)
})
- body := ir.Nodes(r.curfn.Body)
+ body := r.curfn.Body
// Reparent any declarations into the caller function.
for _, name := range r.curfn.Dcl {
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
index ace3024480..f0d228559f 100644
--- a/src/cmd/compile/internal/ppc64/ssa.go
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -631,7 +631,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
p.Reg = v.Args[0].Reg()
- p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(sh)}
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: sh}
p.AddRestSourceArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
// Auxint holds mask
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
index 25fbaf6862..a8bda831b1 100644
--- a/src/cmd/compile/internal/ssa/biasedsparsemap.go
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -31,7 +31,7 @@ func (s *biasedSparseMap) cap() int {
if s == nil || s.s == nil {
return 0
}
- return s.s.cap() + int(s.first)
+ return s.s.cap() + s.first
}
// size returns the number of entries stored in s
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index c9a3e4291c..7edc414bda 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -1553,11 +1553,11 @@ func (debugInfo *FuncDebug) PutLocationListDwarf4(list []byte, ctxt *obj.Link, l
}
if ctxt.UseBASEntries {
- listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
- listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, begin)
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, end)
} else {
- listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
- listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, begin)
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, end)
}
i += 2 * ctxt.Arch.PtrSize
diff --git a/src/cmd/compile/internal/ssa/magic_test.go b/src/cmd/compile/internal/ssa/magic_test.go
index 7c6009dea6..44177d679e 100644
--- a/src/cmd/compile/internal/ssa/magic_test.go
+++ b/src/cmd/compile/internal/ssa/magic_test.go
@@ -33,7 +33,7 @@ func testMagicExhaustive(t *testing.T, n uint) {
min := -int64(1) << (n - 1)
max := int64(1) << (n - 1)
for c := int64(1); c < max; c++ {
- if !smagicOK(n, int64(c)) {
+ if !smagicOK(n, c) {
continue
}
m := int64(smagic(n, c).m)
@@ -164,11 +164,11 @@ func TestMagicSigned(t *testing.T) {
if c>>(n-1) != 0 {
continue // not appropriate for the given n.
}
- if !smagicOK(n, int64(c)) {
+ if !smagicOK(n, c) {
t.Errorf("expected n=%d c=%d to pass\n", n, c)
}
- m := smagic(n, int64(c)).m
- s := smagic(n, int64(c)).s
+ m := smagic(n, c).m
+ s := smagic(n, c).s
C := new(big.Int).SetInt64(c)
M := new(big.Int).SetUint64(m)
@@ -308,13 +308,13 @@ func testDivisibleExhaustive(t *testing.T, n uint) {
minI := -int64(1) << (n - 1)
maxI := int64(1) << (n - 1)
for c := int64(1); c < maxI; c++ {
- if !sdivisibleOK(n, int64(c)) {
+ if !sdivisibleOK(n, c) {
continue
}
- k := sdivisible(n, int64(c)).k
- m := sdivisible(n, int64(c)).m
- a := sdivisible(n, int64(c)).a
- max := sdivisible(n, int64(c)).max
+ k := sdivisible(n, c).k
+ m := sdivisible(n, c).m
+ a := sdivisible(n, c).a
+ max := sdivisible(n, c).max
mask := ^uint64(0) >> (64 - n)
for i := minI; i < maxI; i++ {
want := i%c == 0
@@ -369,13 +369,13 @@ func TestDivisibleSigned(t *testing.T) {
if c>>(n-1) != 0 {
continue // not appropriate for the given n.
}
- if !sdivisibleOK(n, int64(c)) {
+ if !sdivisibleOK(n, c) {
t.Errorf("expected n=%d c=%d to pass\n", n, c)
}
- k := sdivisible(n, int64(c)).k
- m := sdivisible(n, int64(c)).m
- a := sdivisible(n, int64(c)).a
- max := sdivisible(n, int64(c)).max
+ k := sdivisible(n, c).k
+ m := sdivisible(n, c).m
+ a := sdivisible(n, c).a
+ max := sdivisible(n, c).max
mask := ^uint64(0) >> (64 - n)
C := new(big.Int).SetInt64(c)
diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go
index b8fcd39495..6b1df7dc09 100644
--- a/src/cmd/compile/internal/ssa/memcombine.go
+++ b/src/cmd/compile/internal/ssa/memcombine.go
@@ -728,7 +728,7 @@ func combineStores(root *Value) {
if isLittleEndian && shift0 != 0 {
sv = rightShift(root.Block, root.Pos, sv, shift0)
}
- shiftedSize = int64(aTotalSize - a[0].size)
+ shiftedSize = aTotalSize - a[0].size
if isBigEndian && shift0-shiftedSize*8 != 0 {
sv = rightShift(root.Block, root.Pos, sv, shift0-shiftedSize*8)
}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
index f02019df38..9ea735a67d 100644
--- a/src/cmd/compile/internal/ssa/rewrite.go
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -726,7 +726,7 @@ func int32ToAuxInt(i int32) int64 {
return int64(i)
}
func int64ToAuxInt(i int64) int64 {
- return int64(i)
+ return i
}
func uint8ToAuxInt(i uint8) int64 {
return int64(int8(i))
@@ -1603,7 +1603,7 @@ func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
mb, me = men, mbn
}
- return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+ return int64(me) | int64(mb<<8) | rotate<<16 | nbits<<24
}
// Merge (RLDICL [encoded] (SRDconst [s] x)) into (RLDICL [new_encoded] x)
@@ -1712,7 +1712,7 @@ func mergePPC64AndSldi(m, s int64) int64 {
func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
mask_1 := uint64(0xFFFFFFFF >> uint(srw))
// for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
- mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld))
// Rewrite mask to apply after the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
@@ -1724,7 +1724,7 @@ func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
return 0
}
- return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
}
// Test if a doubleword shift right feeding into a CLRLSLDI can be merged into RLWINM.
@@ -1732,7 +1732,7 @@ func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
func mergePPC64ClrlsldiSrd(sld, srd int64) int64 {
mask_1 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(srd)
// for CLRLSLDI, it's more convenient to think of it as a mask left bits then rotate left.
- mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(sld))
// Rewrite mask to apply after the final left shift.
mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
@@ -1749,7 +1749,7 @@ func mergePPC64ClrlsldiSrd(sld, srd int64) int64 {
if v1&mask_3 != 0 {
return 0
}
- return encodePPC64RotateMask(int64(r_3&31), int64(mask_3), 32)
+ return encodePPC64RotateMask(r_3&31, int64(mask_3), 32)
}
// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
@@ -2155,11 +2155,11 @@ func rewriteFixedLoad(v *Value, sym Sym, sb *Value, off int64) *Value {
switch f.Sym.Name {
case "Size_":
v.reset(ptrSizedOpConst)
- v.AuxInt = int64(t.Size())
+ v.AuxInt = t.Size()
return v
case "PtrBytes":
v.reset(ptrSizedOpConst)
- v.AuxInt = int64(types.PtrDataSize(t))
+ v.AuxInt = types.PtrDataSize(t)
return v
case "Hash":
v.reset(OpConst32)
diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go
index ecc0f94e5b..107db23ed4 100644
--- a/src/cmd/compile/internal/ssa/sccp.go
+++ b/src/cmd/compile/internal/ssa/sccp.go
@@ -377,7 +377,7 @@ func (t *worklist) visitValue(val *Value) {
// re-visit all uses of value if its lattice is changed
newLt := t.getLatticeCell(val)
if !equals(newLt, oldLt) {
- if int8(oldLt.tag) > int8(newLt.tag) {
+ if oldLt.tag > newLt.tag {
t.f.Fatalf("Must lower lattice\n")
}
t.addUses(val)
diff --git a/src/cmd/compile/internal/test/divconst_test.go b/src/cmd/compile/internal/test/divconst_test.go
index 9358a60374..5e89ce9a3d 100644
--- a/src/cmd/compile/internal/test/divconst_test.go
+++ b/src/cmd/compile/internal/test/divconst_test.go
@@ -99,28 +99,28 @@ func BenchmarkDivconstU64(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 3
+ u64res = x / 3
}
})
b.Run("5", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 5
+ u64res = x / 5
}
})
b.Run("37", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 37
+ u64res = x / 37
}
})
b.Run("1234567", func(b *testing.B) {
x := uint64(123456789123456789)
for i := 0; i < b.N; i++ {
x += x << 4
- u64res = uint64(x) / 1234567
+ u64res = x / 1234567
}
})
}