aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJakub Ciolek <jakub@ciolek.dev>2025-01-04 00:41:25 +0100
committerJorropo <jorropo.pgm@gmail.com>2026-03-01 21:54:24 -0800
commit5c595a811eec69761f288b0affeeacdcaf1e5e86 (patch)
tree13ee6eea14842534181f4942049b24a36b76b968 /src
parentf6be78b539d0a5b81fdb68ddf02eb48eee140287 (diff)
downloadgo-5c595a811eec69761f288b0affeeacdcaf1e5e86.tar.xz
cmd/compile: combine some generic AMD64 simplifications
Saves a few lines. If applicable, we also directly rewrite to 32 bit MOVLconst, skipping the redundant transformation. Change-Id: I4c2f5e2bb480e798cbe373de608e19a951d168ff Reviewed-on: https://go-review.googlesource.com/c/go/+/640215 Reviewed-by: Keith Randall <khr@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Keith Randall <khr@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Diffstat (limited to 'src')
-rw-r--r--src/cmd/compile/internal/ssa/_gen/AMD64.rules52
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go70
2 files changed, 44 insertions, 78 deletions
diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
index 7ea338ad54..3c4803cd51 100644
--- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules
@@ -1288,20 +1288,13 @@
(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
// Remove redundant *const ops
-(ADDQconst [0] x) => x
-(ADDLconst [c] x) && c==0 => x
-(SUBQconst [0] x) => x
-(SUBLconst [c] x) && c==0 => x
-(ANDQconst [0] _) => (MOVQconst [0])
-(ANDLconst [c] _) && c==0 => (MOVLconst [0])
-(ANDQconst [-1] x) => x
-(ANDLconst [c] x) && c==-1 => x
-(ORQconst [0] x) => x
-(ORLconst [c] x) && c==0 => x
-(ORQconst [-1] _) => (MOVQconst [-1])
-(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
-(XORQconst [0] x) => x
-(XORLconst [c] x) && c==0 => x
+(ADD(Q|L)const [0] x) => x
+(SUB(Q|L)const [0] x) => x
+(AND(Q|L)const [0] _) => (MOVLconst [0])
+(AND(Q|L)const [-1] x) => x
+(OR(Q|L)const [0] x) => x
+(OR(Q|L)const [-1] _) => (MOV(Q|L)const [-1])
+(XOR(Q|L)const [0] x) => x
// TODO: since we got rid of the W/B versions, we might miss
// things like (ANDLconst [0x100] x) which were formerly
// (ANDBconst [0] x). Probably doesn't happen very often.
@@ -1329,8 +1322,7 @@
(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
-(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
-(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(NEG(Q|L) (MOV(Q|L)const [c])) => (MOV(Q|L)const [-c])
(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
@@ -1339,8 +1331,7 @@
(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
-(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
-(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+(NOT(Q|L) (MOV(Q|L)const [c])) => (MOV(Q|L)const [^c])
(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
@@ -1353,16 +1344,11 @@
// generic simplifications
// TODO: more of this
-(ADDQ x (NEGQ y)) => (SUBQ x y)
-(ADDL x (NEGL y)) => (SUBL x y)
-(SUBQ x x) => (MOVQconst [0])
-(SUBL x x) => (MOVLconst [0])
-(ANDQ x x) => x
-(ANDL x x) => x
-(ORQ x x) => x
-(ORL x x) => x
-(XORQ x x) => (MOVQconst [0])
-(XORL x x) => (MOVLconst [0])
+(ADD(Q|L) x (NEG(Q|L) y)) => (SUB(Q|L) x y)
+(SUB(Q|L) x x) => (MOVLconst [0])
+(AND(Q|L) x x) => x
+(OR(Q|L) x x) => x
+(XOR(Q|L) x x) => (MOVLconst [0])
(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
@@ -1373,10 +1359,7 @@
(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
// checking AND against 0.
-(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
-(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
-(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
-(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
+(CMP(Q|L|W|B)const a:(AND(Q|L|L|L) x y) [0]) && a.Uses == 1 => (TEST(Q|L|W|B) x y)
(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
@@ -1389,10 +1372,7 @@
(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
// TEST %reg,%reg is shorter than CMP
-(CMPQconst x [0]) => (TESTQ x x)
-(CMPLconst x [0]) => (TESTL x x)
-(CMPWconst x [0]) => (TESTW x x)
-(CMPBconst x [0]) => (TESTB x x)
+(CMP(Q|L|W|B)const x [0]) => (TEST(Q|L|W|B) x x)
(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
index 918a20c542..dc0a3abb8b 100644
--- a/src/cmd/compile/internal/ssa/rewriteAMD64.go
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -6793,15 +6793,13 @@ func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
v.AddArg2(x, y)
return true
}
- // match: (ADDLconst [c] x)
- // cond: c==0
+ // match: (ADDLconst [0] x)
// result: x
for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(c == 0) {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
+ x := v_0
v.copyOf(x)
return true
}
@@ -8032,27 +8030,23 @@ func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ANDLconst [c] _)
- // cond: c==0
+ // match: (ANDLconst [0] _)
// result: (MOVLconst [0])
for {
- c := auxIntToInt32(v.AuxInt)
- if !(c == 0) {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
v.reset(OpAMD64MOVLconst)
v.AuxInt = int32ToAuxInt(0)
return true
}
- // match: (ANDLconst [c] x)
- // cond: c==-1
+ // match: (ANDLconst [-1] x)
// result: x
for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(c == -1) {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
+ x := v_0
v.copyOf(x)
return true
}
@@ -8481,13 +8475,13 @@ func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
return true
}
// match: (ANDQconst [0] _)
- // result: (MOVQconst [0])
+ // result: (MOVLconst [0])
for {
if auxIntToInt32(v.AuxInt) != 0 {
break
}
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64ToAuxInt(0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (ANDQconst [-1] x)
@@ -19607,24 +19601,20 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (ORLconst [c] x)
- // cond: c==0
+ // match: (ORLconst [0] x)
// result: x
for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(c == 0) {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
+ x := v_0
v.copyOf(x)
return true
}
- // match: (ORLconst [c] _)
- // cond: c==-1
+ // match: (ORLconst [-1] _)
// result: (MOVLconst [-1])
for {
- c := auxIntToInt32(v.AuxInt)
- if !(c == -1) {
+ if auxIntToInt32(v.AuxInt) != -1 {
break
}
v.reset(OpAMD64MOVLconst)
@@ -27458,15 +27448,13 @@ func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
}
func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
v_0 := v.Args[0]
- // match: (SUBLconst [c] x)
- // cond: c==0
+ // match: (SUBLconst [0] x)
// result: x
for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(c == 0) {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
+ x := v_0
v.copyOf(x)
return true
}
@@ -27646,14 +27634,14 @@ func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
return true
}
// match: (SUBQ x x)
- // result: (MOVQconst [0])
+ // result: (MOVLconst [0])
for {
x := v_0
if x != v_1 {
break
}
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64ToAuxInt(0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
@@ -64987,15 +64975,13 @@ func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
v.AddArg(x)
return true
}
- // match: (XORLconst [c] x)
- // cond: c==0
+ // match: (XORLconst [0] x)
// result: x
for {
- c := auxIntToInt32(v.AuxInt)
- x := v_0
- if !(c == 0) {
+ if auxIntToInt32(v.AuxInt) != 0 {
break
}
+ x := v_0
v.copyOf(x)
return true
}
@@ -65248,14 +65234,14 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
break
}
// match: (XORQ x x)
- // result: (MOVQconst [0])
+ // result: (MOVLconst [0])
for {
x := v_0
if x != v_1 {
break
}
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64ToAuxInt(0)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
return true
}
// match: (XORQ x l:(MOVQload [off] {sym} ptr mem))