From ea33523877e4c7a136f2db94a8b5bc4e40220be2 Mon Sep 17 00:00:00 2001 From: Xiangdong Ji Date: Tue, 8 Sep 2020 09:55:20 +0000 Subject: cmd/compile: rewrite some ARM64 rules to use typed aux Passes toolstash-check -all. Change-Id: I7ec36bc048f3031c8201107e6fc5d1257271dbf1 Reviewed-on: https://go-review.googlesource.com/c/go/+/234379 Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Trust: Alberto Donizetti Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/ARM64.rules | 496 +++++++-------- src/cmd/compile/internal/ssa/rewriteARM64.go | 912 +++++++++++++-------------- 2 files changed, 704 insertions(+), 704 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 311067e87a..c4a3532632 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -1116,280 +1116,280 @@ // if a register move has only 1 use, just use the same register without emitting instruction // MOVDnop doesn't emit instruction, only for ensuring the type. -(MOVDreg x) && x.Uses == 1 -> (MOVDnop x) +(MOVDreg x) && x.Uses == 1 => (MOVDnop x) // fold constant into arithmatic ops -(ADD x (MOVDconst [c])) -> (ADDconst [c] x) -(SUB x (MOVDconst [c])) -> (SUBconst [c] x) -(AND x (MOVDconst [c])) -> (ANDconst [c] x) -(OR x (MOVDconst [c])) -> (ORconst [c] x) -(XOR x (MOVDconst [c])) -> (XORconst [c] x) -(TST x (MOVDconst [c])) -> (TSTconst [c] x) -(TSTW x (MOVDconst [c])) -> (TSTWconst [c] x) -(CMN x (MOVDconst [c])) -> (CMNconst [c] x) -(CMNW x (MOVDconst [c])) -> (CMNWconst [c] x) -(BIC x (MOVDconst [c])) -> (ANDconst [^c] x) -(EON x (MOVDconst [c])) -> (XORconst [^c] x) -(ORN x (MOVDconst [c])) -> (ORconst [^c] x) - -(SLL x (MOVDconst [c])) -> (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64) -(SRL x (MOVDconst [c])) -> (SRLconst x [c&63]) -(SRA x (MOVDconst [c])) -> (SRAconst x [c&63]) - -(CMP x (MOVDconst [c])) -> (CMPconst [c] x) -(CMP (MOVDconst [c]) x) -> (InvertFlags (CMPconst [c] x)) +(ADD x (MOVDconst [c])) => (ADDconst [c] x) +(SUB x (MOVDconst [c])) => (SUBconst [c] x) +(AND x (MOVDconst [c])) => (ANDconst [c] x) +(OR x (MOVDconst [c])) => (ORconst [c] x) +(XOR x (MOVDconst [c])) => (XORconst [c] x) +(TST x (MOVDconst [c])) => (TSTconst [c] x) +(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x) +(CMN x (MOVDconst [c])) => (CMNconst [c] x) +(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x) +(BIC x (MOVDconst [c])) => (ANDconst [^c] x) +(EON x (MOVDconst [c])) => (XORconst [^c] x) +(ORN x (MOVDconst [c])) => (ORconst [^c] x) + +(SLL x (MOVDconst [c])) => (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64) +(SRL x (MOVDconst [c])) => (SRLconst x [c&63]) +(SRA x (MOVDconst [c])) => (SRAconst x [c&63]) + +(CMP x (MOVDconst [c])) => (CMPconst [c] x) +(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x)) (CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x) (CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x)) // Canonicalize the order of arguments to comparisons - helps with CSE. -((CMP|CMPW) x y) && x.ID > y.ID -> (InvertFlags ((CMP|CMPW) y x)) +((CMP|CMPW) x y) && x.ID > y.ID => (InvertFlags ((CMP|CMPW) y x)) -// mul-neg -> mneg -(NEG (MUL x y)) -> (MNEG x y) -(NEG (MULW x y)) -> (MNEGW x y) -(MUL (NEG x) y) -> (MNEG x y) -(MULW (NEG x) y) -> (MNEGW x y) +// mul-neg => mneg +(NEG (MUL x y)) => (MNEG x y) +(NEG (MULW x y)) => (MNEGW x y) +(MUL (NEG x) y) => (MNEG x y) +(MULW (NEG x) y) => (MNEGW x y) // madd/msub -(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MADD a x y) -(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y) -(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MSUB a x y) -(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) -> (MADD a x y) +(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y) +(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y) +(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y) +(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y) -(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y) -(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y) -(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MSUBW a x y) -(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) -> (MADDW a x y) +(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y) +(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y) +(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y) +(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y) // optimize ADCSflags, SBCSflags and friends -(ADCSflags x y (Select1 (ADDSconstflags [-1] (ADCzerocarry c)))) -> (ADCSflags x y c) -(ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) -> (ADDSflags x y) -(SBCSflags x y (Select1 (NEGSflags (NEG (NGCzerocarry bo))))) -> (SBCSflags x y bo) -(SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) -> (SUBSflags x y) +(ADCSflags x y (Select1 (ADDSconstflags [-1] (ADCzerocarry c)))) => (ADCSflags x y c) +(ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y) +(SBCSflags x y (Select1 (NEGSflags (NEG (NGCzerocarry bo))))) => (SBCSflags x y bo) +(SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) => (SUBSflags x y) // mul by constant -(MUL x (MOVDconst [-1])) -> (NEG x) -(MUL _ (MOVDconst [0])) -> (MOVDconst [0]) -(MUL x (MOVDconst [1])) -> x -(MUL x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) -(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (ADDshiftLL x x [log2(c-1)]) -(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (ADDshiftLL (NEG x) x [log2(c+1)]) -(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) -(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) -(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) -(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) - -(MULW x (MOVDconst [c])) && int32(c)==-1 -> (NEG x) -(MULW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0]) -(MULW x (MOVDconst [c])) && int32(c)==1 -> x -(MULW x (MOVDconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) -(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (ADDshiftLL x x [log2(c-1)]) -(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (ADDshiftLL (NEG x) x [log2(c+1)]) -(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) -(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) -(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) -(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) +(MUL x (MOVDconst [-1])) => (NEG x) +(MUL _ (MOVDconst [0])) => (MOVDconst [0]) +(MUL x (MOVDconst [1])) => x +(MUL x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x) +(MUL x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (ADDshiftLL x x [log2(c-1)]) +(MUL x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (ADDshiftLL (NEG x) x [log2(c+1)]) +(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) +(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) +(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) +(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) + +(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x) +(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0]) +(MULW x (MOVDconst [c])) && int32(c)==1 => x +(MULW x (MOVDconst [c])) && isPowerOfTwo(c) => (SLLconst [log2(c)] x) +(MULW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log2(c-1)]) +(MULW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG x) x [log2(c+1)]) +(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) +(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) +(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (ADDshiftLL (NEG x) x [3])) +(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) // mneg by constant -(MNEG x (MOVDconst [-1])) -> x -(MNEG _ (MOVDconst [0])) -> (MOVDconst [0]) -(MNEG x (MOVDconst [1])) -> (NEG x) -(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) -> (NEG (SLLconst [log2(c)] x)) -(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 -> (NEG (ADDshiftLL x x [log2(c-1)])) -(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 -> (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) -(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) -(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) -(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) -(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - -(MNEGW x (MOVDconst [c])) && int32(c)==-1 -> x -(MNEGW _ (MOVDconst [c])) && int32(c)==0 -> (MOVDconst [0]) -(MNEGW x (MOVDconst [c])) && int32(c)==1 -> (NEG x) -(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) -> (NEG (SLLconst [log2(c)] x)) -(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (NEG (ADDshiftLL x x [log2(c-1)])) -(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) -(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) -(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) -(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) -(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) - -(MADD a x (MOVDconst [-1])) -> (SUB a x) -(MADD a _ (MOVDconst [0])) -> a -(MADD a x (MOVDconst [1])) -> (ADD a x) -(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)]) -(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL x x [log2(c-1)])) -(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL x x [log2(c+1)])) -(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MADD a (MOVDconst [-1]) x) -> (SUB a x) -(MADD a (MOVDconst [0]) _) -> a -(MADD a (MOVDconst [1]) x) -> (ADD a x) -(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)]) -(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (ADD a (ADDshiftLL x x [log2(c-1)])) -(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (SUB a (SUBshiftLL x x [log2(c+1)])) -(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MADDW a x (MOVDconst [c])) && int32(c)==-1 -> (SUB a x) -(MADDW a _ (MOVDconst [c])) && int32(c)==0 -> a -(MADDW a x (MOVDconst [c])) && int32(c)==1 -> (ADD a x) -(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)]) -(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL x x [log2(c-1)])) -(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL x x [log2(c+1)])) -(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MADDW a (MOVDconst [c]) x) && int32(c)==-1 -> (SUB a x) -(MADDW a (MOVDconst [c]) _) && int32(c)==0 -> a -(MADDW a (MOVDconst [c]) x) && int32(c)==1 -> (ADD a x) -(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (ADDshiftLL a x [log2(c)]) -(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (ADD a (ADDshiftLL x x [log2(c-1)])) -(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (SUB a (SUBshiftLL x x [log2(c+1)])) -(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MSUB a x (MOVDconst [-1])) -> (ADD a x) -(MSUB a _ (MOVDconst [0])) -> a -(MSUB a x (MOVDconst [1])) -> (SUB a x) -(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)]) -(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL x x [log2(c-1)])) -(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL x x [log2(c+1)])) -(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MSUB a (MOVDconst [-1]) x) -> (ADD a x) -(MSUB a (MOVDconst [0]) _) -> a -(MSUB a (MOVDconst [1]) x) -> (SUB a x) -(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)]) -(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 -> (SUB a (ADDshiftLL x x [log2(c-1)])) -(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 -> (ADD a (SUBshiftLL x x [log2(c+1)])) -(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) -> (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) -> (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) -> (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) -> (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MSUBW a x (MOVDconst [c])) && int32(c)==-1 -> (ADD a x) -(MSUBW a _ (MOVDconst [c])) && int32(c)==0 -> a -(MSUBW a x (MOVDconst [c])) && int32(c)==1 -> (SUB a x) -(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)]) -(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL x x [log2(c-1)])) -(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL x x [log2(c+1)])) -(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) - -(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 -> (ADD a x) -(MSUBW a (MOVDconst [c]) _) && int32(c)==0 -> a -(MSUBW a (MOVDconst [c]) x) && int32(c)==1 -> (SUB a x) -(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) -> (SUBshiftLL a x [log2(c)]) -(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 -> (SUB a (ADDshiftLL x x [log2(c-1)])) -(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 -> (ADD a (SUBshiftLL x x [log2(c+1)])) -(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) -(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) -(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) -(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) +(MNEG x (MOVDconst [-1])) => x +(MNEG _ (MOVDconst [0])) => (MOVDconst [0]) +(MNEG x (MOVDconst [1])) => (NEG x) +(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst [log2(c)] x)) +(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL x x [log2(c-1)])) +(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) +(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) +(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) +(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) +(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + +(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x +(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0]) +(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst [log2(c)] x)) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL x x [log2(c-1)])) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL (NEG x) x [log2(c+1)])) +(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SLLconst [log2(c/3)] (SUBshiftLL x x [2])) +(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (NEG (SLLconst [log2(c/5)] (ADDshiftLL x x [2]))) +(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SLLconst [log2(c/7)] (SUBshiftLL x x [3])) +(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (NEG (SLLconst [log2(c/9)] (ADDshiftLL x x [3]))) + +(MADD a x (MOVDconst [-1])) => (SUB a x) +(MADD a _ (MOVDconst [0])) => a +(MADD a x (MOVDconst [1])) => (ADD a x) +(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)]) +(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL x x [log2(c-1)])) +(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL x x [log2(c+1)])) +(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MADD a (MOVDconst [-1]) x) => (SUB a x) +(MADD a (MOVDconst [0]) _) => a +(MADD a (MOVDconst [1]) x) => (ADD a x) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)]) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL x x [log2(c-1)])) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL x x [log2(c+1)])) +(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x) +(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a +(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)]) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL x x [log2(c-1)])) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL x x [log2(c+1)])) +(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x) +(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a +(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log2(c)]) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL x x [log2(c-1)])) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL x x [log2(c+1)])) +(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MSUB a x (MOVDconst [-1])) => (ADD a x) +(MSUB a _ (MOVDconst [0])) => a +(MSUB a x (MOVDconst [1])) => (SUB a x) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)]) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL x x [log2(c-1)])) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL x x [log2(c+1)])) +(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MSUB a (MOVDconst [-1]) x) => (ADD a x) +(MSUB a (MOVDconst [0]) _) => a +(MSUB a (MOVDconst [1]) x) => (SUB a x) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)]) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL x x [log2(c-1)])) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL x x [log2(c+1)])) +(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x) +(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a +(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)]) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL x x [log2(c-1)])) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL x x [log2(c+1)])) +(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) + +(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x) +(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a +(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log2(c)]) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL x x [log2(c-1)])) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL x x [log2(c+1)])) +(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL x x [2]) [log2(c/3)]) +(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL x x [2]) [log2(c/5)]) +(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL x x [3]) [log2(c/7)]) +(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL x x [3]) [log2(c/9)]) // div by constant -(UDIV x (MOVDconst [1])) -> x -(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x) -(UDIVW x (MOVDconst [c])) && uint32(c)==1 -> x -(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (SRLconst [log2(c)] x) -(UMOD _ (MOVDconst [1])) -> (MOVDconst [0]) -(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) -> (ANDconst [c-1] x) -(UMODW _ (MOVDconst [c])) && uint32(c)==1 -> (MOVDconst [0]) -(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) -> (ANDconst [c-1] x) +(UDIV x (MOVDconst [1])) => x +(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log2(c)] x) +(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x +(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log2(c)] x) +(UMOD _ (MOVDconst [1])) => (MOVDconst [0]) +(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x) +(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0]) +(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x) // generic simplifications -(ADD x (NEG y)) -> (SUB x y) -(SUB x x) -> (MOVDconst [0]) -(AND x x) -> x -(OR x x) -> x -(XOR x x) -> (MOVDconst [0]) -(BIC x x) -> (MOVDconst [0]) -(EON x x) -> (MOVDconst [-1]) -(ORN x x) -> (MOVDconst [-1]) -(AND x (MVN y)) -> (BIC x y) -(XOR x (MVN y)) -> (EON x y) -(OR x (MVN y)) -> (ORN x y) -(MVN (XOR x y)) -> (EON x y) +(ADD x (NEG y)) => (SUB x y) +(SUB x x) => (MOVDconst [0]) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVDconst [0]) +(BIC x x) => (MOVDconst [0]) +(EON x x) => (MOVDconst [-1]) +(ORN x x) => (MOVDconst [-1]) +(AND x (MVN y)) => (BIC x y) +(XOR x (MVN y)) => (EON x y) +(OR x (MVN y)) => (ORN x y) +(MVN (XOR x y)) => (EON x y) (CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag) (CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag) -(SUB x (SUB y z)) -> (SUB (ADD x z) y) -(SUB (SUB x y) z) -> (SUB x (ADD y z)) +(SUB x (SUB y z)) => (SUB (ADD x z) y) +(SUB (SUB x y) z) => (SUB x (ADD y z)) // remove redundant *const ops -(ADDconst [0] x) -> x -(SUBconst [0] x) -> x -(ANDconst [0] _) -> (MOVDconst [0]) -(ANDconst [-1] x) -> x -(ORconst [0] x) -> x -(ORconst [-1] _) -> (MOVDconst [-1]) -(XORconst [0] x) -> x -(XORconst [-1] x) -> (MVN x) +(ADDconst [0] x) => x +(SUBconst [0] x) => x +(ANDconst [0] _) => (MOVDconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVDconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (MVN x) // generic constant folding -(ADDconst [c] (MOVDconst [d])) -> (MOVDconst [c+d]) -(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [c+d] x) -(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [c-d] x) -(SUBconst [c] (MOVDconst [d])) -> (MOVDconst [d-c]) -(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [-c-d] x) -(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [-c+d] x) -(SLLconst [c] (MOVDconst [d])) -> (MOVDconst [d< (MOVDconst [int64(uint64(d)>>uint64(c))]) -(SRAconst [c] (MOVDconst [d])) -> (MOVDconst [d>>uint64(c)]) -(MUL (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c*d]) -(MULW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)*int32(d))]) -(MNEG (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-c*d]) -(MNEGW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [-int64(int32(c)*int32(d))]) -(MADD (MOVDconst [c]) x y) -> (ADDconst [c] (MUL x y)) -(MADDW (MOVDconst [c]) x y) -> (ADDconst [c] (MULW x y)) -(MSUB (MOVDconst [c]) x y) -> (ADDconst [c] (MNEG x y)) -(MSUBW (MOVDconst [c]) x y) -> (ADDconst [c] (MNEGW x y)) -(MADD a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [c*d] a) -(MADDW a (MOVDconst [c]) (MOVDconst [d])) -> (ADDconst [int64(int32(c)*int32(d))] a) -(MSUB a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [c*d] a) -(MSUBW a (MOVDconst [c]) (MOVDconst [d])) -> (SUBconst [int64(int32(c)*int32(d))] a) -(DIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c/d]) -(UDIV (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)/uint64(d))]) -(DIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)/int32(d))]) -(UDIVW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)/uint32(d))]) -(MOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [c%d]) -(UMOD (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint64(c)%uint64(d))]) -(MODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(int32(c)%int32(d))]) -(UMODW (MOVDconst [c]) (MOVDconst [d])) -> (MOVDconst [int64(uint32(c)%uint32(d))]) -(ANDconst [c] (MOVDconst [d])) -> (MOVDconst [c&d]) -(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) -(ANDconst [c] (MOVWUreg x)) -> (ANDconst [c&(1<<32-1)] x) -(ANDconst [c] (MOVHUreg x)) -> (ANDconst [c&(1<<16-1)] x) -(ANDconst [c] (MOVBUreg x)) -> (ANDconst [c&(1<<8-1)] x) -(MOVWUreg (ANDconst [c] x)) -> (ANDconst [c&(1<<32-1)] x) -(MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&(1<<16-1)] x) -(MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&(1<<8-1)] x) -(ORconst [c] (MOVDconst [d])) -> (MOVDconst [c|d]) -(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x) -(XORconst [c] (MOVDconst [d])) -> (MOVDconst [c^d]) -(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x) -(MVN (MOVDconst [c])) -> (MOVDconst [^c]) -(NEG (MOVDconst [c])) -> (MOVDconst [-c]) -(MOVBreg (MOVDconst [c])) -> (MOVDconst [int64(int8(c))]) -(MOVBUreg (MOVDconst [c])) -> (MOVDconst [int64(uint8(c))]) -(MOVHreg (MOVDconst [c])) -> (MOVDconst [int64(int16(c))]) -(MOVHUreg (MOVDconst [c])) -> (MOVDconst [int64(uint16(c))]) -(MOVWreg (MOVDconst [c])) -> (MOVDconst [int64(int32(c))]) -(MOVWUreg (MOVDconst [c])) -> (MOVDconst [int64(uint32(c))]) -(MOVDreg (MOVDconst [c])) -> (MOVDconst [c]) +(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d]) +(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) +(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) +(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c]) +(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x) +(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x) +(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d< (MOVDconst [int64(uint64(d)>>uint64(c))]) +(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)]) +(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d]) +(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))]) +(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d]) +(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))]) +(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL x y)) +(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW x y)) +(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG x y)) +(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW x y)) +(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a) +(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a) +(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a) +(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a) +(DIV (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c/d]) +(UDIV (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint64(c)/uint64(d))]) +(DIVW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)/int32(d))]) +(UDIVW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c)/uint32(d))]) +(MOD (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c%d]) +(UMOD (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint64(c)%uint64(d))]) +(MODW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)%int32(d))]) +(UMODW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c)%uint32(d))]) +(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x) +(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x) +(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x) +(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x) +(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x) +(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x) +(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d]) +(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) +(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d]) +(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) +(MVN (MOVDconst [c])) => (MOVDconst [^c]) +(NEG (MOVDconst [c])) => (MOVDconst [-c]) +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) +(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) +(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) +(MOVDreg (MOVDconst [c])) => (MOVDconst [c]) // constant comparisons (CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)]) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 0fb86b6bdd..6c48812121 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1065,7 +1065,7 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { break } v_2_0 := v_2.Args[0] - if v_2_0.Op != OpARM64ADDSconstflags || v_2_0.AuxInt != -1 { + if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { break } v_2_0_0 := v_2_0.Args[0] @@ -1086,11 +1086,11 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { break } v_2_0 := v_2.Args[0] - if v_2_0.Op != OpARM64ADDSconstflags || v_2_0.AuxInt != -1 { + if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpARM64MOVDconst || v_2_0_0.AuxInt != 0 { + if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { break } v.reset(OpARM64ADDSflags) @@ -1112,9 +1112,9 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ADDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -1593,7 +1593,7 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { // match: (ADDconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -1603,40 +1603,40 @@ func rewriteValueARM64_OpARM64ADDconst(v *Value) bool { // match: (ADDconst [c] (MOVDconst [d])) // result: (MOVDconst [c+d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(c + d) return true } // match: (ADDconst [c] (ADDconst [d] x)) // result: (ADDconst [c+d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ADDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) - v.AuxInt = c + d + v.AuxInt = int64ToAuxInt(c + d) v.AddArg(x) return true } // match: (ADDconst [c] (SUBconst [d] x)) // result: (ADDconst [c-d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SUBconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) - v.AuxInt = c - d + v.AuxInt = int64ToAuxInt(c - d) v.AddArg(x) return true } @@ -1882,9 +1882,9 @@ func rewriteValueARM64_OpARM64AND(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -1988,17 +1988,17 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { // match: (ANDconst [0] _) // result: (MOVDconst [0]) for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (ANDconst [-1] x) // result: x for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 @@ -2008,65 +2008,65 @@ func rewriteValueARM64_OpARM64ANDconst(v *Value) bool { // match: (ANDconst [c] (MOVDconst [d])) // result: (MOVDconst [c&d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(c & d) return true } // match: (ANDconst [c] (ANDconst [d] x)) // result: (ANDconst [c&d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ANDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & d + v.AuxInt = int64ToAuxInt(c & d) v.AddArg(x) return true } // match: (ANDconst [c] (MOVWUreg x)) // result: (ANDconst [c&(1<<32-1)] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVWUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<32 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) v.AddArg(x) return true } // match: (ANDconst [c] (MOVHUreg x)) // result: (ANDconst [c&(1<<16-1)] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVHUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<16 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) v.AddArg(x) return true } // match: (ANDconst [c] (MOVBUreg x)) // result: (ANDconst [c&(1<<8-1)] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVBUreg { break } x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<8 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) v.AddArg(x) return true } @@ -2280,9 +2280,9 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ANDconst) - v.AuxInt = ^c + v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } @@ -2294,7 +2294,7 @@ func rewriteValueARM64_OpARM64BIC(v *Value) bool { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (BIC x0 x1:(SLLconst [c] y)) @@ -2475,9 +2475,9 @@ func rewriteValueARM64_OpARM64CMN(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -2555,16 +2555,16 @@ func rewriteValueARM64_OpARM64CMNW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (CMNW x (MOVDconst [c])) - // result: (CMNWconst [c] x) + // result: (CMNWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMNWconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -2726,9 +2726,9 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64CMPconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -2738,11 +2738,11 @@ func rewriteValueARM64_OpARM64CMP(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 v.reset(OpARM64InvertFlags) v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) - v0.AuxInt = c + v0.AuxInt = int64ToAuxInt(c) v0.AddArg(x) v.AddArg(v0) return true @@ -3377,13 +3377,13 @@ func rewriteValueARM64_OpARM64DIV(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c / d + v.AuxInt = int64ToAuxInt(c / d) return true } return false @@ -3397,13 +3397,13 @@ func rewriteValueARM64_OpARM64DIVW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) / int32(d)) + v.AuxInt = int64ToAuxInt(int64(int32(c) / int32(d))) return true } return false @@ -3418,9 +3418,9 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) - v.AuxInt = ^c + v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } @@ -3432,7 +3432,7 @@ func rewriteValueARM64_OpARM64EON(v *Value) bool { break } v.reset(OpARM64MOVDconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (EON x0 x1:(SLLconst [c] y)) @@ -4858,7 +4858,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { for { a := v_0 x := v_1 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != -1 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { break } v.reset(OpARM64SUB) @@ -4869,7 +4869,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { // result: a for { a := v_0 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } v.copyOf(a) @@ -4880,7 +4880,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { for { a := v_0 x := v_1 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 1 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { break } v.reset(OpARM64ADD) @@ -4896,12 +4896,12 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -4914,13 +4914,13 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c-1) && c >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -4934,13 +4934,13 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c+1) && c >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -4954,14 +4954,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -4975,14 +4975,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -4996,14 +4996,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5017,14 +5017,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5033,7 +5033,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { // result: (SUB a x) for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { break } x := v_2 @@ -5045,7 +5045,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { // result: a for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } v.copyOf(a) @@ -5055,7 +5055,7 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { // result: (ADD a x) for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } x := v_2 @@ -5071,13 +5071,13 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -5089,14 +5089,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c-1) && c >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5109,14 +5109,14 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c+1) && c >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5129,15 +5129,15 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5150,15 +5150,15 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5171,15 +5171,15 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5192,15 +5192,15 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5211,11 +5211,11 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type) v0.AddArg2(x, y) v.AddArg(v0) @@ -5228,13 +5228,13 @@ func rewriteValueARM64_OpARM64MADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } - d := v_2.AuxInt + d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64ADDconst) - v.AuxInt = c * d + v.AuxInt = int64ToAuxInt(c * d) v.AddArg(a) return true } @@ -5254,7 +5254,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == -1) { break } @@ -5270,7 +5270,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 0) { break } @@ -5286,7 +5286,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 1) { break } @@ -5303,12 +5303,12 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -5321,13 +5321,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5341,13 +5341,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5361,14 +5361,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5382,14 +5382,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5403,14 +5403,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5424,14 +5424,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5444,7 +5444,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == -1) { break @@ -5461,7 +5461,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { break } @@ -5476,7 +5476,7 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == 1) { break @@ -5493,13 +5493,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -5511,14 +5511,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5531,14 +5531,14 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5551,15 +5551,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5572,15 +5572,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5593,15 +5593,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5614,15 +5614,15 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -5633,11 +5633,11 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type) v0.AddArg2(x, y) v.AddArg(v0) @@ -5650,13 +5650,13 @@ func rewriteValueARM64_OpARM64MADDW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } - d := v_2.AuxInt + d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64ADDconst) - v.AuxInt = int64(int32(c) * int32(d)) + v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d))) v.AddArg(a) return true } @@ -5671,7 +5671,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { continue } v.copyOf(x) @@ -5683,11 +5683,11 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { continue } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } break @@ -5697,7 +5697,7 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { continue } v.reset(OpARM64NEG) @@ -5715,13 +5715,13 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) + v0.AuxInt = int64ToAuxInt(log2(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -5737,13 +5737,13 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c-1) && c >= 3) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -5759,13 +5759,13 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c+1) && c >= 7) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) @@ -5783,15 +5783,15 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -5807,15 +5807,15 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) + v0.AuxInt = int64ToAuxInt(log2(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 + v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) @@ -5832,15 +5832,15 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -5856,15 +5856,15 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) + v0.AuxInt = int64ToAuxInt(log2(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 + v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) @@ -5879,13 +5879,13 @@ func rewriteValueARM64_OpARM64MNEG(v *Value) bool { if v_0.Op != OpARM64MOVDconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = -c * d + v.AuxInt = int64ToAuxInt(-c * d) return true } break @@ -5905,7 +5905,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == -1) { continue } @@ -5922,12 +5922,12 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { continue } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } break @@ -5941,7 +5941,7 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 1) { continue } @@ -5960,13 +5960,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c) + v0.AuxInt = int64ToAuxInt(log2(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -5982,13 +5982,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c-1) && int32(c) >= 3) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -6004,13 +6004,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c+1) && int32(c) >= 7) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) @@ -6028,15 +6028,15 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -6052,15 +6052,15 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 5) + v0.AuxInt = int64ToAuxInt(log2(c / 5)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 2 + v1.AuxInt = int64ToAuxInt(2) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) @@ -6077,15 +6077,15 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) v.Type = x.Type - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -6101,15 +6101,15 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { continue } v.reset(OpARM64NEG) v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type) - v0.AuxInt = log2(c / 9) + v0.AuxInt = int64ToAuxInt(log2(c / 9)) v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v1.AuxInt = 3 + v1.AuxInt = int64ToAuxInt(3) v1.AddArg2(x, x) v0.AddArg(v1) v.AddArg(v0) @@ -6124,13 +6124,13 @@ func rewriteValueARM64_OpARM64MNEGW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = -int64(int32(c) * int32(d)) + v.AuxInt = int64ToAuxInt(-int64(int32(c) * int32(d))) return true } break @@ -6146,13 +6146,13 @@ func rewriteValueARM64_OpARM64MOD(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c % d + v.AuxInt = int64ToAuxInt(c % d) return true } return false @@ -6166,13 +6166,13 @@ func rewriteValueARM64_OpARM64MODW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) % int32(d)) + v.AuxInt = int64ToAuxInt(int64(int32(c) % int32(d))) return true } return false @@ -6380,10 +6380,10 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if v_0.Op != OpARM64ANDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<8 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<8 - 1)) v.AddArg(x) return true } @@ -6393,9 +6393,9 @@ func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint8(c)) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) return true } // match: (MOVBUreg x) @@ -6636,9 +6636,9 @@ func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int8(c)) + v.AuxInt = int64ToAuxInt(int64(int8(c))) return true } // match: (MOVBreg (SLLconst [lc] x)) @@ -8991,9 +8991,9 @@ func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) return true } return false @@ -9874,10 +9874,10 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if v_0.Op != OpARM64ANDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<16 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<16 - 1)) v.AddArg(x) return true } @@ -9887,9 +9887,9 @@ func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint16(c)) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) return true } // match: (MOVHUreg (SLLconst [sc] x)) @@ -10301,9 +10301,9 @@ func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int16(c)) + v.AuxInt = int64ToAuxInt(int64(int16(c))) return true } // match: (MOVHreg (SLLconst [lc] x)) @@ -11971,10 +11971,10 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if v_0.Op != OpARM64ANDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ANDconst) - v.AuxInt = c & (1<<32 - 1) + v.AuxInt = int64ToAuxInt(c & (1<<32 - 1)) v.AddArg(x) return true } @@ -11984,9 +11984,9 @@ func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c)) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) return true } // match: (MOVWUreg (SLLconst [sc] x)) @@ -12456,9 +12456,9 @@ func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c)) + v.AuxInt = int64ToAuxInt(int64(int32(c))) return true } // match: (MOVWreg (SLLconst [lc] x)) @@ -13346,7 +13346,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { for { a := v_0 x := v_1 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != -1 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 { break } v.reset(OpARM64ADD) @@ -13357,7 +13357,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { // result: a for { a := v_0 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 0 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 { break } v.copyOf(a) @@ -13368,7 +13368,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { for { a := v_0 x := v_1 - if v_2.Op != OpARM64MOVDconst || v_2.AuxInt != 1 { + if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 { break } v.reset(OpARM64SUB) @@ -13384,12 +13384,12 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -13402,13 +13402,13 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c-1) && c >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13422,13 +13422,13 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c+1) && c >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13442,14 +13442,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13463,14 +13463,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13484,14 +13484,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13505,14 +13505,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13521,7 +13521,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { // result: (ADD a x) for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { break } x := v_2 @@ -13533,7 +13533,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { // result: a for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { break } v.copyOf(a) @@ -13543,7 +13543,7 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { // result: (SUB a x) for { a := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } x := v_2 @@ -13559,13 +13559,13 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -13577,14 +13577,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c-1) && c >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13597,14 +13597,14 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c+1) && c >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13617,15 +13617,15 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13638,15 +13638,15 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13659,15 +13659,15 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13680,15 +13680,15 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13699,11 +13699,11 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type) v0.AddArg2(x, y) v.AddArg(v0) @@ -13716,13 +13716,13 @@ func rewriteValueARM64_OpARM64MSUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } - d := v_2.AuxInt + d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64SUBconst) - v.AuxInt = c * d + v.AuxInt = int64ToAuxInt(c * d) v.AddArg(a) return true } @@ -13742,7 +13742,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == -1) { break } @@ -13758,7 +13758,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 0) { break } @@ -13774,7 +13774,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(int32(c) == 1) { break } @@ -13791,12 +13791,12 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -13809,13 +13809,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13829,13 +13829,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13849,14 +13849,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13870,14 +13870,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13891,14 +13891,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13912,14 +13912,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_2.Op != OpARM64MOVDconst { break } - c := v_2.AuxInt + c := auxIntToInt64(v_2.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -13932,7 +13932,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == -1) { break @@ -13949,7 +13949,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { break } @@ -13964,7 +13964,7 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(int32(c) == 1) { break @@ -13981,13 +13981,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg2(a, x) return true } @@ -13999,14 +13999,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c-1) && int32(c) >= 3) { break } v.reset(OpARM64SUB) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = log2(c - 1) + v0.AuxInt = int64ToAuxInt(log2(c - 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14019,14 +14019,14 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(isPowerOfTwo(c+1) && int32(c) >= 7) { break } v.reset(OpARM64ADD) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = log2(c + 1) + v0.AuxInt = int64ToAuxInt(log2(c + 1)) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14039,15 +14039,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14060,15 +14060,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14081,15 +14081,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { break } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14102,15 +14102,15 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) x := v_2 if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { break } v.reset(OpARM64SUBshiftLL) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg2(a, v0) return true @@ -14121,11 +14121,11 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) x := v_1 y := v_2 v.reset(OpARM64ADDconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type) v0.AddArg2(x, y) v.AddArg(v0) @@ -14138,13 +14138,13 @@ func rewriteValueARM64_OpARM64MSUBW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if v_2.Op != OpARM64MOVDconst { break } - d := v_2.AuxInt + d := auxIntToInt64(v_2.AuxInt) v.reset(OpARM64SUBconst) - v.AuxInt = int64(int32(c) * int32(d)) + v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d))) v.AddArg(a) return true } @@ -14174,7 +14174,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != -1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 { continue } v.reset(OpARM64NEG) @@ -14187,11 +14187,11 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { // result: (MOVDconst [0]) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 0 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 { continue } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } break @@ -14201,7 +14201,7 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { continue } v.copyOf(x) @@ -14218,12 +14218,12 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -14238,12 +14238,12 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c-1) && c >= 3) { continue } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) + v.AuxInt = int64ToAuxInt(log2(c - 1)) v.AddArg2(x, x) return true } @@ -14258,12 +14258,12 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c+1) && c >= 7) { continue } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) + v.AuxInt = int64ToAuxInt(log2(c + 1)) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) v.AddArg2(v0, x) @@ -14280,14 +14280,14 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14303,14 +14303,14 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14326,14 +14326,14 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) @@ -14351,14 +14351,14 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14372,13 +14372,13 @@ func rewriteValueARM64_OpARM64MUL(v *Value) bool { if v_0.Op != OpARM64MOVDconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c * d + v.AuxInt = int64ToAuxInt(c * d) return true } break @@ -14413,7 +14413,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == -1) { continue } @@ -14431,12 +14431,12 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 0) { continue } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } break @@ -14450,7 +14450,7 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(int32(c) == 1) { continue } @@ -14468,12 +14468,12 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -14488,12 +14488,12 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c-1) && int32(c) >= 3) { continue } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c - 1) + v.AuxInt = int64ToAuxInt(log2(c - 1)) v.AddArg2(x, x) return true } @@ -14508,12 +14508,12 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c+1) && int32(c) >= 7) { continue } v.reset(OpARM64ADDshiftLL) - v.AuxInt = log2(c + 1) + v.AuxInt = int64ToAuxInt(log2(c + 1)) v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v0.AddArg(x) v.AddArg2(v0, x) @@ -14530,14 +14530,14 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 3) + v.AuxInt = int64ToAuxInt(log2(c / 3)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 1 + v0.AuxInt = int64ToAuxInt(1) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14553,14 +14553,14 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 5) + v.AuxInt = int64ToAuxInt(log2(c / 5)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 2 + v0.AuxInt = int64ToAuxInt(2) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14576,14 +14576,14 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 7) + v.AuxInt = int64ToAuxInt(log2(c / 7)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type) v1.AddArg(x) v0.AddArg2(v1, x) @@ -14601,14 +14601,14 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { continue } v.reset(OpARM64SLLconst) - v.AuxInt = log2(c / 9) + v.AuxInt = int64ToAuxInt(log2(c / 9)) v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type) - v0.AuxInt = 3 + v0.AuxInt = int64ToAuxInt(3) v0.AddArg2(x, x) v.AddArg(v0) return true @@ -14622,13 +14622,13 @@ func rewriteValueARM64_OpARM64MULW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { continue } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { continue } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(int32(c) * int32(d)) + v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d))) return true } break @@ -14655,9 +14655,9 @@ func rewriteValueARM64_OpARM64MVN(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = ^c + v.AuxInt = int64ToAuxInt(^c) return true } // match: (MVN x:(SLLconst [c] y)) @@ -14796,9 +14796,9 @@ func rewriteValueARM64_OpARM64NEG(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = -c + v.AuxInt = int64ToAuxInt(-c) return true } // match: (NEG x:(SLLconst [c] y)) @@ -14944,9 +14944,9 @@ func rewriteValueARM64_OpARM64OR(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -16938,9 +16938,9 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64ORconst) - v.AuxInt = ^c + v.AuxInt = int64ToAuxInt(^c) v.AddArg(x) return true } @@ -16952,7 +16952,7 @@ func rewriteValueARM64_OpARM64ORN(v *Value) bool { break } v.reset(OpARM64MOVDconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORN x0 x1:(SLLconst [c] y)) @@ -17127,7 +17127,7 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { // match: (ORconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -17137,36 +17137,36 @@ func rewriteValueARM64_OpARM64ORconst(v *Value) bool { // match: (ORconst [-1] _) // result: (MOVDconst [-1]) for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } v.reset(OpARM64MOVDconst) - v.AuxInt = -1 + v.AuxInt = int64ToAuxInt(-1) return true } // match: (ORconst [c] (MOVDconst [d])) // result: (MOVDconst [c|d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c | d + v.AuxInt = int64ToAuxInt(c | d) return true } // match: (ORconst [c] (ORconst [d] x)) // result: (ORconst [c|d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ORconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ORconst) - v.AuxInt = c | d + v.AuxInt = int64ToAuxInt(c | d) v.AddArg(x) return true } @@ -19064,7 +19064,7 @@ func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool { break } v_2_0_0 := v_2_0.Args[0] - if v_2_0_0.Op != OpARM64MOVDconst || v_2_0_0.AuxInt != 0 { + if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 { break } v.reset(OpARM64SUBSflags) @@ -19083,9 +19083,9 @@ func rewriteValueARM64_OpARM64SLL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SLLconst) - v.AuxInt = c & 63 + v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } @@ -19096,13 +19096,13 @@ func rewriteValueARM64_OpARM64SLLconst(v *Value) bool { // match: (SLLconst [c] (MOVDconst [d])) // result: (MOVDconst [d<>uint64(c)]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = d >> uint64(c) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) return true } // match: (SRAconst [rc] (SLLconst [lc] x)) @@ -19378,9 +19378,9 @@ func rewriteValueARM64_OpARM64SRL(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SRLconst) - v.AuxInt = c & 63 + v.AuxInt = int64ToAuxInt(c & 63) v.AddArg(x) return true } @@ -19391,13 +19391,13 @@ func rewriteValueARM64_OpARM64SRLconst(v *Value) bool { // match: (SRLconst [c] (MOVDconst [d])) // result: (MOVDconst [int64(uint64(d)>>uint64(c))]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(d) >> uint64(c)) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) return true } // match: (SRLconst [c] (SLLconst [c] x)) @@ -19679,9 +19679,9 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64SUBconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -19765,7 +19765,7 @@ func rewriteValueARM64_OpARM64SUB(v *Value) bool { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (SUB x (SUB y z)) @@ -19862,7 +19862,7 @@ func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { // match: (SUBconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -19872,40 +19872,40 @@ func rewriteValueARM64_OpARM64SUBconst(v *Value) bool { // match: (SUBconst [c] (MOVDconst [d])) // result: (MOVDconst [d-c]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = d - c + v.AuxInt = int64ToAuxInt(d - c) return true } // match: (SUBconst [c] (SUBconst [d] x)) // result: (ADDconst [-c-d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64SUBconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) - v.AuxInt = -c - d + v.AuxInt = int64ToAuxInt(-c - d) v.AddArg(x) return true } // match: (SUBconst [c] (ADDconst [d] x)) // result: (ADDconst [-c+d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64ADDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64ADDconst) - v.AuxInt = -c + d + v.AuxInt = int64ToAuxInt(-c + d) v.AddArg(x) return true } @@ -20030,9 +20030,9 @@ func rewriteValueARM64_OpARM64TST(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -20110,16 +20110,16 @@ func rewriteValueARM64_OpARM64TSTW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (TSTW x (MOVDconst [c])) - // result: (TSTWconst [c] x) + // result: (TSTWconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64TSTWconst) - v.AuxInt = c + v.AuxInt = int32ToAuxInt(int32(c)) v.AddArg(x) return true } @@ -20375,7 +20375,7 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { // result: x for { x := v_0 - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.copyOf(x) @@ -20389,12 +20389,12 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64SRLconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -20404,13 +20404,13 @@ func rewriteValueARM64_OpARM64UDIV(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(c) / uint64(d)) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) return true } return false @@ -20426,7 +20426,7 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) == 1) { break } @@ -20441,12 +20441,12 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c) && is32Bit(c)) { break } v.reset(OpARM64SRLconst) - v.AuxInt = log2(c) + v.AuxInt = int64ToAuxInt(log2(c)) v.AddArg(x) return true } @@ -20456,13 +20456,13 @@ func rewriteValueARM64_OpARM64UDIVW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c) / uint32(d)) + v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d))) return true } return false @@ -20490,11 +20490,11 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { // match: (UMOD _ (MOVDconst [1])) // result: (MOVDconst [0]) for { - if v_1.Op != OpARM64MOVDconst || v_1.AuxInt != 1 { + if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (UMOD x (MOVDconst [c])) @@ -20505,12 +20505,12 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c)) { break } v.reset(OpARM64ANDconst) - v.AuxInt = c - 1 + v.AuxInt = int64ToAuxInt(c - 1) v.AddArg(x) return true } @@ -20520,13 +20520,13 @@ func rewriteValueARM64_OpARM64UMOD(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint64(c) % uint64(d)) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) return true } return false @@ -20558,12 +20558,12 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(uint32(c) == 1) { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (UMODW x (MOVDconst [c])) @@ -20574,12 +20574,12 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { if v_1.Op != OpARM64MOVDconst { break } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) if !(isPowerOfTwo(c) && is32Bit(c)) { break } v.reset(OpARM64ANDconst) - v.AuxInt = c - 1 + v.AuxInt = int64ToAuxInt(c - 1) v.AddArg(x) return true } @@ -20589,13 +20589,13 @@ func rewriteValueARM64_OpARM64UMODW(v *Value) bool { if v_0.Op != OpARM64MOVDconst { break } - c := v_0.AuxInt + c := auxIntToInt64(v_0.AuxInt) if v_1.Op != OpARM64MOVDconst { break } - d := v_1.AuxInt + d := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = int64(uint32(c) % uint32(d)) + v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d))) return true } return false @@ -20613,9 +20613,9 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } - c := v_1.AuxInt + c := auxIntToInt64(v_1.AuxInt) v.reset(OpARM64XORconst) - v.AuxInt = c + v.AuxInt = int64ToAuxInt(c) v.AddArg(x) return true } @@ -20629,7 +20629,7 @@ func rewriteValueARM64_OpARM64XOR(v *Value) bool { break } v.reset(OpARM64MOVDconst) - v.AuxInt = 0 + v.AuxInt = int64ToAuxInt(0) return true } // match: (XOR x (MVN y)) @@ -21001,7 +21001,7 @@ func rewriteValueARM64_OpARM64XORconst(v *Value) bool { // match: (XORconst [0] x) // result: x for { - if v.AuxInt != 0 { + if auxIntToInt64(v.AuxInt) != 0 { break } x := v_0 @@ -21011,7 +21011,7 @@ func rewriteValueARM64_OpARM64XORconst(v *Value) bool { // match: (XORconst [-1] x) // result: (MVN x) for { - if v.AuxInt != -1 { + if auxIntToInt64(v.AuxInt) != -1 { break } x := v_0 @@ -21022,26 +21022,26 @@ func rewriteValueARM64_OpARM64XORconst(v *Value) bool { // match: (XORconst [c] (MOVDconst [d])) // result: (MOVDconst [c^d]) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64MOVDconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) v.reset(OpARM64MOVDconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(c ^ d) return true } // match: (XORconst [c] (XORconst [d] x)) // result: (XORconst [c^d] x) for { - c := v.AuxInt + c := auxIntToInt64(v.AuxInt) if v_0.Op != OpARM64XORconst { break } - d := v_0.AuxInt + d := auxIntToInt64(v_0.AuxInt) x := v_0.Args[0] v.reset(OpARM64XORconst) - v.AuxInt = c ^ d + v.AuxInt = int64ToAuxInt(c ^ d) v.AddArg(x) return true } -- cgit v1.3 From b4ef49e527787ec932d0b371bb24c3fc370b1e8d Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 12 Jun 2020 13:48:26 -0400 Subject: cmd/compile: introduce special ssa Aux type for calls This is prerequisite to moving call expansion later into SSA, and probably a good idea anyway. Passes tests. This is the first minimal CL that does a 1-for-1 substitution of *ssa.AuxCall for *obj.LSym. Next step (next CL) is to make this change for all calls so that additional information can be stored in AuxCall. Change-Id: Ia3a7715648fd9fb1a176850767a726e6f5b959eb Reviewed-on: https://go-review.googlesource.com/c/go/+/237680 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/plive.go | 6 +-- src/cmd/compile/internal/gc/ssa.go | 40 ++++++++++------- src/cmd/compile/internal/ssa/check.go | 12 +++++ src/cmd/compile/internal/ssa/func_test.go | 7 +++ src/cmd/compile/internal/ssa/fuse_test.go | 4 +- src/cmd/compile/internal/ssa/gen/386Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/ARM64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/ARMOps.go | 2 +- src/cmd/compile/internal/ssa/gen/MIPS64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/MIPSOps.go | 2 +- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 2 +- src/cmd/compile/internal/ssa/gen/RISCV64Ops.go | 6 +-- src/cmd/compile/internal/ssa/gen/S390XOps.go | 2 +- src/cmd/compile/internal/ssa/gen/WasmOps.go | 2 +- src/cmd/compile/internal/ssa/gen/generic.rules | 12 ++--- src/cmd/compile/internal/ssa/gen/genericOps.go | 6 +-- src/cmd/compile/internal/ssa/gen/rulegen.go | 10 ++++- src/cmd/compile/internal/ssa/loopreschedchecks.go | 2 +- src/cmd/compile/internal/ssa/op.go | 13 ++++++ src/cmd/compile/internal/ssa/opGen.go | 53 +++++++++-------------- src/cmd/compile/internal/ssa/regalloc_test.go | 12 ++--- src/cmd/compile/internal/ssa/rewrite.go | 33 +++++++------- src/cmd/compile/internal/ssa/rewritegeneric.go | 40 ++++++++--------- src/cmd/compile/internal/ssa/value.go | 4 +- src/cmd/compile/internal/ssa/writebarrier.go | 4 +- src/cmd/compile/internal/wasm/ssa.go | 5 ++- 27 files changed, 164 insertions(+), 123 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 8976ed657a..a9ea37701e 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -861,7 +861,7 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool { // typedmemclr and typedmemmove are write barriers and // deeply non-preemptible. They are unsafe points and // hence should not have liveness maps. - if sym, _ := v.Aux.(*obj.LSym); sym == typedmemclr || sym == typedmemmove { + if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) { return false } return true @@ -1231,8 +1231,8 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) { s := "live at " if v == nil { s += fmt.Sprintf("entry to %s:", lv.fn.funcname()) - } else if sym, ok := v.Aux.(*obj.LSym); ok { - fn := sym.Name + } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil { + fn := sym.Fn.Name if pos := strings.Index(fn, "."); pos >= 0 { fn = fn[pos+1:] } diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 3bdb5b0b9f..0ee31bd9a3 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -805,6 +805,11 @@ func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa. return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1) } +// newValue2A adds a new value with two arguments and an aux value to the current block. +func (s *state) newValue2A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1 *ssa.Value) *ssa.Value { + return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1) +} + // newValue2Apos adds a new value with two arguments and an aux value to the current block. // isStmt determines whether the created values may be a statement or not // (i.e., false means never, yes means maybe). @@ -4297,10 +4302,10 @@ func (s *state) openDeferExit() { v := s.load(r.closure.Type.Elem(), r.closure) s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[TUINTPTR], v) - call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, v, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, nil, codeptr, v, s.mem()) } else { // Do a static call if the original call was a static function or method - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn.Sym.Linksym(), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: fn.Sym.Linksym()}, s.mem()) } call.AuxInt = stksize s.vars[&memVar] = call @@ -4432,7 +4437,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // Call runtime.deferprocStack with pointer to _defer record. arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize()) s.store(types.Types[TUINTPTR], arg0, addr) - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferprocStack, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: deferprocStack}, s.mem()) if stksize < int64(Widthptr) { // We need room for both the call to deferprocStack and the call to // the deferred function. @@ -4477,9 +4482,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // call target switch { case k == callDefer: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, deferproc, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: deferproc}, s.mem()) case k == callGo: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, newproc, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: newproc}, s.mem()) case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4487,11 +4492,11 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[TUINTPTR], closure) - call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, nil, codeptr, closure, s.mem()) case codeptr != nil: - call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem()) + call = s.newValue2A(ssa.OpInterCall, types.TypeMem, nil, codeptr, s.mem()) case sym != nil: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: sym.Linksym()}, s.mem()) default: s.Fatalf("bad call type %v %v", n.Op, n) } @@ -4924,7 +4929,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . off = Rnd(off, int64(Widthreg)) // Issue call - call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem()) + call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: fn}, s.mem()) s.vars[&memVar] = call if !returns { @@ -6355,6 +6360,9 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { } // Add symbol's offset from its base register. switch n := v.Aux.(type) { + case *ssa.AuxCall: + a.Name = obj.NAME_EXTERN + a.Sym = n.Fn case *obj.LSym: a.Name = obj.NAME_EXTERN a.Sym = n @@ -6541,10 +6549,10 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { } else { p.Pos = v.Pos.WithNotStmt() } - if sym, ok := v.Aux.(*obj.LSym); ok { + if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil { p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN - p.To.Sym = sym + p.To.Sym = sym.Fn } else { // TODO(mdempsky): Can these differences be eliminated? switch thearch.LinkArch.Family { @@ -6567,12 +6575,14 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { idx := s.livenessMap.Get(v) if !idx.StackMapValid() { // See Liveness.hasStackMap. - if sym, _ := v.Aux.(*obj.LSym); !(sym == typedmemclr || sym == typedmemmove) { + if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == typedmemclr || sym.Fn == typedmemmove) { Fatalf("missing stack map index for %v", v.LongString()) } } - if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn { + call, ok := v.Aux.(*ssa.AuxCall) + + if ok && call.Fn == Deferreturn { // Deferred calls will appear to be returning to // the CALL deferreturn(SB) that we are about to emit. // However, the stack trace code will show the line @@ -6584,11 +6594,11 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) { thearch.Ginsnopdefer(s.pp) } - if sym, ok := v.Aux.(*obj.LSym); ok { + if ok { // Record call graph information for nowritebarrierrec // analysis. if nowritebarrierrecCheck != nil { - nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos) + nowritebarrierrecCheck.recordCall(s.pp.curfn, call.Fn, v.Pos) } } diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 828f645b39..9ce87e0aea 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -165,6 +165,18 @@ func checkFunc(f *Func) { f.Fatalf("value %v has Aux type %T, want string", v, v.Aux) } canHaveAux = true + case auxCallOff: + canHaveAuxInt = true + fallthrough + case auxCall: + if ac, ok := v.Aux.(*AuxCall); ok { + if v.Op == OpStaticCall && ac.Fn == nil { + f.Fatalf("value %v has *AuxCall with nil Fn", v) + } + } else { + f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux) + } + canHaveAux = true case auxSym, auxTyp: canHaveAux = true case auxSymOff, auxSymValAndOff, auxTypSize: diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go index 5f6f80f72a..568c6436f5 100644 --- a/src/cmd/compile/internal/ssa/func_test.go +++ b/src/cmd/compile/internal/ssa/func_test.go @@ -38,6 +38,7 @@ package ssa import ( "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/src" "fmt" "reflect" @@ -140,6 +141,12 @@ var emptyPass pass = pass{ name: "empty pass", } +// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check" +// as the Aux of a static call. +func AuxCallLSym(name string) *AuxCall { + return &AuxCall{Fn: &obj.LSym{}} +} + // Fun takes the name of an entry bloc and a series of Bloc calls, and // returns a fun containing the composed Func. entry must be a name // supplied to one of the Bloc functions. Each of the bloc names and diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go index 5fe3da93ca..15190997f2 100644 --- a/src/cmd/compile/internal/ssa/fuse_test.go +++ b/src/cmd/compile/internal/ssa/fuse_test.go @@ -142,10 +142,10 @@ func TestFuseSideEffects(t *testing.T) { Valu("b", OpArg, c.config.Types.Bool, 0, nil), If("b", "then", "else")), Bloc("then", - Valu("call1", OpStaticCall, types.TypeMem, 0, nil, "mem"), + Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Goto("empty")), Bloc("else", - Valu("call2", OpStaticCall, types.TypeMem, 0, nil, "mem"), + Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Goto("empty")), Bloc("empty", Goto("loop")), diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index 1061e5579d..64a17cb7a3 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -463,7 +463,7 @@ func init() { faultOnNilArg0: true, }, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index e6d66957dd..d267fe8753 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -767,7 +767,7 @@ func init() { faultOnNilArg0: true, }, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go index e9af261a6a..f52d68dc33 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go @@ -471,7 +471,7 @@ func init() { {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 068fecf74c..1e6b4546da 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -428,7 +428,7 @@ func init() { {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go index 5f00c080af..dc2e9d3ec9 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go @@ -273,7 +273,7 @@ func init() { {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go index a5f6c8df54..c66adcf93a 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go +++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go @@ -255,7 +255,7 @@ func init() { {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 44f6a74c63..0c04e561ad 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -414,7 +414,7 @@ func init() { {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go index 8ab4abe04a..17970918e2 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go @@ -224,9 +224,9 @@ func init() { {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem // Calls - {name: "CALLstatic", argLength: 1, reg: call, aux: "SymOff", call: true, symEffect: "None"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: callInter, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: callInter, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // Generic moves and zeros diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index 710beaddbb..eede8a654b 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -475,7 +475,7 @@ func init() { {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go index e43eae17e9..3286a68fb0 100644 --- a/src/cmd/compile/internal/ssa/gen/WasmOps.go +++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go @@ -122,7 +122,7 @@ func init() { ) var WasmOps = []opData{ - {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index f7e6bbebac..df70838aa9 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -1933,30 +1933,30 @@ // recognize runtime.newobject and don't Zero/Nilcheck it (Zero (Load (OffPtr [c] (SP)) mem) mem) && mem.Op == OpStaticCall - && isSameSym(mem.Aux, "runtime.newobject") + && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value => mem (Store (Load (OffPtr [c] (SP)) mem) x mem) && isConstZero(x) && mem.Op == OpStaticCall - && isSameSym(mem.Aux, "runtime.newobject") + && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value => mem (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) && isConstZero(x) && mem.Op == OpStaticCall - && isSameSym(mem.Aux, "runtime.newobject") + && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value => mem // nil checks just need to rewrite to something useless. // they will be deadcode eliminated soon afterwards. (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) - && symNamed(sym, "runtime.newobject") + && isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value && warnRule(fe.Debug_checknil(), v, "removed nil check") => (Invalid) (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) - && symNamed(sym, "runtime.newobject") + && isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value && warnRule(fe.Debug_checknil(), v, "removed nil check") => (Invalid) @@ -2010,7 +2010,7 @@ // See the comment in op Move in genericOps.go for discussion of the type. (StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) && sz >= 0 - && symNamed(sym, "runtime.memmove") + && isSameCall(sym, "runtime.memmove") && t.IsPtr() // avoids TUINTPTR, see issue 30061 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 5df0a164bf..acfe222089 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -387,9 +387,9 @@ var genericOps = []opData{ // as a phantom first argument. // TODO(josharian): ClosureCall and InterCall should have Int32 aux // to match StaticCall's 32 bit arg size limit. - {name: "ClosureCall", argLength: 3, aux: "Int64", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. - {name: "StaticCall", argLength: 1, aux: "SymOff", call: true, symEffect: "None"}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. - {name: "InterCall", argLength: 2, aux: "Int64", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + {name: "ClosureCall", argLength: 3, aux: "Int64", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. + {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. + {name: "InterCall", argLength: 2, aux: "Int64", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. // Conversions: signed extensions, zero (unsigned) extensions, truncations {name: "SignExt8to16", argLength: 1, typ: "Int16"}, diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 9e2e112cd7..be51a7c5f8 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -1424,7 +1424,7 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxi func opHasAuxInt(op opData) bool { switch op.aux { case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "Float32", "Float64", - "SymOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop": + "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop": return true } return false @@ -1432,7 +1432,7 @@ func opHasAuxInt(op opData) bool { func opHasAux(op opData) bool { switch op.aux { - case "String", "Sym", "SymOff", "SymValAndOff", "Typ", "TypSize", + case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize", "S390XCCMask", "S390XRotateParams": return true } @@ -1775,6 +1775,10 @@ func (op opData) auxType() string { return "Sym" case "SymOff": return "Sym" + case "Call": + return "Call" + case "CallOff": + return "Call" case "SymValAndOff": return "Sym" case "Typ": @@ -1809,6 +1813,8 @@ func (op opData) auxIntType() string { return "float32" case "Float64": return "float64" + case "CallOff": + return "int32" case "SymOff": return "int32" case "SymValAndOff": diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index 1932f9d23a..4a720fdede 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -246,7 +246,7 @@ func insertLoopReschedChecks(f *Func) { // mem1 := call resched (mem0) // goto header resched := f.fe.Syslook("goschedguarded") - mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, resched, mem0) + mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, &AuxCall{resched}, mem0) sched.AddEdgeTo(h) headerMemPhi.AddArg(mem1) diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 063998c6a1..3aa506e3ab 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -67,6 +67,17 @@ type regInfo struct { type auxType int8 +type AuxCall struct { + Fn *obj.LSym +} + +func (a *AuxCall) String() string { + if a.Fn == nil { + return "AuxCall(nil)" + } + return fmt.Sprintf("AuxCall(%v)", a.Fn) +} + const ( auxNone auxType = iota auxBool // auxInt is 0/1 for false/true @@ -85,6 +96,8 @@ const ( auxTyp // aux is a type auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan) + auxCall // aux is a *ssa.AuxCall + auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size // architecture specific aux types auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 45401898c8..797d82f2d1 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5816,11 +5816,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7 }, @@ -13152,11 +13151,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 4294967279, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 }, @@ -16922,11 +16920,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, @@ -20556,11 +20553,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, @@ -22257,11 +22253,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO }, @@ -23804,11 +23799,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO }, @@ -26504,11 +26498,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 }, @@ -27787,11 +27780,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CALLstatic", - auxType: auxSymOff, - argLen: 1, - call: true, - symEffect: SymNone, + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + call: true, reg: regInfo{ clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, @@ -31386,11 +31378,10 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLstatic", - auxType: auxSymOff, + auxType: auxCallOff, argLen: 1, clobberFlags: true, call: true, - symEffect: SymNone, reg: regInfo{ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 }, @@ -32031,11 +32022,10 @@ var opcodeTable = [...]opInfo{ }, { - name: "LoweredStaticCall", - auxType: auxSymOff, - argLen: 1, - call: true, - symEffect: SymNone, + name: "LoweredStaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, reg: regInfo{ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g }, @@ -34779,12 +34769,11 @@ var opcodeTable = [...]opInfo{ generic: true, }, { - name: "StaticCall", - auxType: auxSymOff, - argLen: 1, - call: true, - symEffect: SymNone, - generic: true, + name: "StaticCall", + auxType: auxCallOff, + argLen: 1, + call: true, + generic: true, }, { name: "InterCall", diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go index bb8be5e7ac..d990cac47b 100644 --- a/src/cmd/compile/internal/ssa/regalloc_test.go +++ b/src/cmd/compile/internal/ssa/regalloc_test.go @@ -68,7 +68,7 @@ func TestNoGetgLoadReg(t *testing.T) { Exit("v16"), ), Bloc("b2", - Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, nil, "v1"), + Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"), Goto("b3"), ), ) @@ -99,7 +99,7 @@ func TestSpillWithLoop(t *testing.T) { ), Bloc("loop", Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"), - Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, nil, "memphi"), + Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"), Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"), Eq("test", "next", "exit"), ), @@ -140,12 +140,12 @@ func TestSpillMove1(t *testing.T) { Bloc("exit1", // store before call, y is available in a register Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"), - Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem2"), + Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"), Exit("mem3"), ), Bloc("exit2", // store after call, y must be loaded from a spill location - Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"), + Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"), Exit("mem5"), ), @@ -188,13 +188,13 @@ func TestSpillMove2(t *testing.T) { ), Bloc("exit1", // store after call, y must be loaded from a spill location - Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"), + Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"), Exit("mem3"), ), Bloc("exit2", // store after call, y must be loaded from a spill location - Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, nil, "mem"), + Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"), Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"), Exit("mem5"), ), diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 09f94ef53e..8195d407e0 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -393,15 +393,9 @@ func canMergeLoad(target, load *Value) bool { return true } -// symNamed reports whether sym's name is name. -func symNamed(sym Sym, name string) bool { - return sym.String() == name -} - -// isSameSym reports whether sym is the same as the given named symbol -func isSameSym(sym interface{}, name string) bool { - s, ok := sym.(fmt.Stringer) - return ok && s.String() == name +// isSameCall reports whether sym is the same as the given named symbol +func isSameCall(sym interface{}, name string) bool { + return sym.(*AuxCall).Fn.String() == name } // nlz returns the number of leading zeros. @@ -713,6 +707,9 @@ func auxToSym(i interface{}) Sym { func auxToType(i interface{}) *types.Type { return i.(*types.Type) } +func auxToCall(i interface{}) *AuxCall { + return i.(*AuxCall) +} func auxToS390xCCMask(i interface{}) s390x.CCMask { return i.(s390x.CCMask) } @@ -726,6 +723,9 @@ func stringToAux(s string) interface{} { func symToAux(s Sym) interface{} { return s } +func callToAux(s *AuxCall) interface{} { + return s +} func typeToAux(t *types.Type) interface{} { return t } @@ -743,7 +743,7 @@ func uaddOvf(a, b int64) bool { // de-virtualize an InterCall // 'sym' is the symbol for the itab -func devirt(v *Value, sym Sym, offset int64) *obj.LSym { +func devirt(v *Value, sym Sym, offset int64) *AuxCall { f := v.Block.Func n, ok := sym.(*obj.LSym) if !ok { @@ -757,7 +757,10 @@ func devirt(v *Value, sym Sym, offset int64) *obj.LSym { f.Warnl(v.Pos, "couldn't de-virtualize call") } } - return lsym + if lsym == nil { + return nil + } + return &AuxCall{Fn: lsym} } // isSamePtr reports whether p1 and p2 point to the same address. @@ -1377,12 +1380,12 @@ func registerizable(b *Block, typ *types.Type) bool { } // needRaceCleanup reports whether this call to racefuncenter/exit isn't needed. -func needRaceCleanup(sym Sym, v *Value) bool { +func needRaceCleanup(sym *AuxCall, v *Value) bool { f := v.Block.Func if !f.Config.Race { return false } - if !symNamed(sym, "runtime.racefuncenter") && !symNamed(sym, "runtime.racefuncexit") { + if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") { return false } for _, b := range f.Blocks { @@ -1391,7 +1394,7 @@ func needRaceCleanup(sym Sym, v *Value) bool { case OpStaticCall: // Check for racefuncenter will encounter racefuncexit and vice versa. // Allow calls to panic* - s := v.Aux.(fmt.Stringer).String() + s := v.Aux.(*AuxCall).Fn.String() switch s { case "runtime.racefuncenter", "runtime.racefuncexit", "runtime.panicdivide", "runtime.panicwrap", @@ -1409,7 +1412,7 @@ func needRaceCleanup(sym Sym, v *Value) bool { } } } - if symNamed(sym, "runtime.racefuncenter") { + if isSameCall(sym, "runtime.racefuncenter") { // If we're removing racefuncenter, remove its argument as well. if v.Args[0].Op != OpStore { return false diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 180e48b34c..4b388a68cd 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -8515,7 +8515,7 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { } v.reset(OpStaticCall) v.AuxInt = int32ToAuxInt(int32(argsize)) - v.Aux = symToAux(devirt(v, itab, off)) + v.Aux = callToAux(devirt(v, itab, off)) v.AddArg(mem) return true } @@ -16022,7 +16022,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { return true } // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) - // cond: symNamed(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") + // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { if v_0.Op != OpLoad { @@ -16042,15 +16042,15 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { if v_0_1.Op != OpStaticCall { break } - sym := auxToSym(v_0_1.Aux) - if !(symNamed(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + sym := auxToCall(v_0_1.Aux) + if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) return true } // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) - // cond: symNamed(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") + // cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check") // result: (Invalid) for { if v_0.Op != OpOffPtr { @@ -16074,8 +16074,8 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool { if v_0_0_1.Op != OpStaticCall { break } - sym := auxToSym(v_0_0_1.Aux) - if !(symNamed(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { + sym := auxToCall(v_0_0_1.Aux) + if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) { break } v.reset(OpInvalid) @@ -21067,10 +21067,10 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { b := v.Block config := b.Func.Config // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: sz >= 0 && symNamed(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) // result: (Move {t.Elem()} [int64(sz)] dst src mem) for { - sym := auxToSym(v.Aux) + sym := auxToCall(v.Aux) s1 := v_0 if s1.Op != OpStore { break @@ -21094,7 +21094,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { t := auxToType(s3.Aux) mem := s3.Args[2] dst := s3.Args[1] - if !(sz >= 0 && symNamed(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { break } v.reset(OpMove) @@ -21104,10 +21104,10 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { return true } // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) - // cond: sz >= 0 && symNamed(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) + // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3) // result: (Move {t.Elem()} [int64(sz)] dst src mem) for { - sym := auxToSym(v.Aux) + sym := auxToCall(v.Aux) s1 := v_0 if s1.Op != OpStore { break @@ -21131,7 +21131,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { t := auxToType(s3.Aux) mem := s3.Args[2] dst := s3.Args[1] - if !(sz >= 0 && symNamed(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { + if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) { break } v.reset(OpMove) @@ -21144,7 +21144,7 @@ func rewriteValuegeneric_OpStaticCall(v *Value) bool { // cond: needRaceCleanup(sym, v) // result: x for { - sym := auxToSym(v.Aux) + sym := auxToCall(v.Aux) x := v_0 if !(needRaceCleanup(sym, v)) { break @@ -21608,7 +21608,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { return true } // match: (Store (Load (OffPtr [c] (SP)) mem) x mem) - // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize + // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { if v_0.Op != OpLoad { @@ -21625,14 +21625,14 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } x := v_1 - if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.copyOf(mem) return true } // match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem) - // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize + // cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { if v_0.Op != OpOffPtr { @@ -21653,7 +21653,7 @@ func rewriteValuegeneric_OpStore(v *Value) bool { break } x := v_1 - if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.copyOf(mem) @@ -24337,7 +24337,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { b := v.Block config := b.Func.Config // match: (Zero (Load (OffPtr [c] (SP)) mem) mem) - // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize + // cond: mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // result: mem for { if v_0.Op != OpLoad { @@ -24350,7 +24350,7 @@ func rewriteValuegeneric_OpZero(v *Value) bool { } c := auxIntToInt64(v_0_0.AuxInt) v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { + if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) { break } v.copyOf(mem) diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 6692df7921..94b8763d5d 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -193,11 +193,11 @@ func (v *Value) auxString() string { return fmt.Sprintf(" [%g]", v.AuxFloat()) case auxString: return fmt.Sprintf(" {%q}", v.Aux) - case auxSym, auxTyp: + case auxSym, auxCall, auxTyp: if v.Aux != nil { return fmt.Sprintf(" {%v}", v.Aux) } - case auxSymOff, auxTypSize: + case auxSymOff, auxCallOff, auxTypSize: s := "" if v.Aux != nil { s = fmt.Sprintf(" {%v}", v.Aux) diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 214798a1ab..c358406862 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -523,7 +523,7 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va off = round(off, config.PtrSize) // issue call - mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, fn, mem) + mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, &AuxCall{fn}, mem) mem.AuxInt = off - config.ctxt.FixedFrameSize() return mem } @@ -582,7 +582,7 @@ func IsNewObject(v *Value, mem *Value) bool { if mem.Op != OpStaticCall { return false } - if !isSameSym(mem.Aux, "runtime.newobject") { + if !isSameCall(mem.Aux, "runtime.newobject") { return false } if v.Args[0].Op != OpOffPtr { diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go index 7861667b88..a36fbca4e0 100644 --- a/src/cmd/compile/internal/wasm/ssa.go +++ b/src/cmd/compile/internal/wasm/ssa.go @@ -122,7 +122,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Op { case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall: s.PrepareCall(v) - if v.Aux == gc.Deferreturn { + if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == gc.Deferreturn { // add a resume point before call to deferreturn so it can be called again via jmpdefer s.Prog(wasm.ARESUMEPOINT) } @@ -130,7 +130,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { getValue64(s, v.Args[1]) setReg(s, wasm.REG_CTXT) } - if sym, ok := v.Aux.(*obj.LSym); ok { + if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil { + sym := call.Fn p := s.Prog(obj.ACALL) p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym} p.Pos = v.Pos -- cgit v1.3 From 3c85e995efd0c2adf8578ed27565ad3b427f1a43 Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 15 Jun 2020 18:27:02 -0400 Subject: cmd/compile: extend ssa.AuxCall to closure and interface calls Also introduce helper methods. Change-Id: I11a744ed002bae0ca9ebabba3206e1c14147e03d Reviewed-on: https://go-review.googlesource.com/c/go/+/239080 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 18 +++++----- src/cmd/compile/internal/ssa/gen/386Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/ARM64Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/ARMOps.go | 6 ++-- src/cmd/compile/internal/ssa/gen/MIPS64Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/MIPSOps.go | 6 ++-- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/RISCV64Ops.go | 6 ++-- src/cmd/compile/internal/ssa/gen/S390XOps.go | 6 ++-- src/cmd/compile/internal/ssa/gen/WasmOps.go | 8 ++--- src/cmd/compile/internal/ssa/gen/genericOps.go | 6 ++-- src/cmd/compile/internal/ssa/loopreschedchecks.go | 2 +- src/cmd/compile/internal/ssa/op.go | 15 ++++++++ src/cmd/compile/internal/ssa/opGen.go | 44 +++++++++++------------ src/cmd/compile/internal/ssa/rewrite.go | 2 +- src/cmd/compile/internal/ssa/rewritegeneric.go | 2 +- src/cmd/compile/internal/ssa/writebarrier.go | 2 +- 18 files changed, 84 insertions(+), 69 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 0ee31bd9a3..542b1b51c2 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4302,10 +4302,10 @@ func (s *state) openDeferExit() { v := s.load(r.closure.Type.Elem(), r.closure) s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[TUINTPTR], v) - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, nil, codeptr, v, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, v, s.mem()) } else { // Do a static call if the original call was a static function or method - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: fn.Sym.Linksym()}, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym()), s.mem()) } call.AuxInt = stksize s.vars[&memVar] = call @@ -4437,7 +4437,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // Call runtime.deferprocStack with pointer to _defer record. arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize()) s.store(types.Types[TUINTPTR], arg0, addr) - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: deferprocStack}, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack), s.mem()) if stksize < int64(Widthptr) { // We need room for both the call to deferprocStack and the call to // the deferred function. @@ -4482,9 +4482,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // call target switch { case k == callDefer: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: deferproc}, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc), s.mem()) case k == callGo: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: newproc}, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc), s.mem()) case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4492,11 +4492,11 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[TUINTPTR], closure) - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, nil, codeptr, closure, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, closure, s.mem()) case codeptr != nil: - call = s.newValue2A(ssa.OpInterCall, types.TypeMem, nil, codeptr, s.mem()) + call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(), codeptr, s.mem()) case sym != nil: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: sym.Linksym()}, s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym()), s.mem()) default: s.Fatalf("bad call type %v %v", n.Op, n) } @@ -4929,7 +4929,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args . off = Rnd(off, int64(Widthreg)) // Issue call - call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, &ssa.AuxCall{Fn: fn}, s.mem()) + call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn), s.mem()) s.vars[&memVar] = call if !returns { diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index 64a17cb7a3..ddabde7d3d 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -463,9 +463,9 @@ func init() { faultOnNilArg0: true, }, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // arg0 = destination pointer // arg1 = source pointer diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index d267fe8753..2df5016d59 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -767,9 +767,9 @@ func init() { faultOnNilArg0: true, }, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // arg0 = destination pointer // arg1 = source pointer diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go index f52d68dc33..9ff53f7e4e 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go @@ -471,9 +471,9 @@ func init() { {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // pseudo-ops {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 1e6b4546da..70c789937a 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -428,9 +428,9 @@ func init() { {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // pseudo-ops {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go index dc2e9d3ec9..e1e3933502 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go @@ -273,9 +273,9 @@ func init() { {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // duffzero // arg0 = address of memory to zero diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go index c66adcf93a..cd7357f62b 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go +++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go @@ -255,9 +255,9 @@ func init() { {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // atomic ops diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 0c04e561ad..37706b2dd9 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -414,9 +414,9 @@ func init() { {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // large or unaligned zeroing // arg0 = address of memory to zero (in R3, changed as side effect) diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go index 17970918e2..b06b86075e 100644 --- a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go @@ -224,9 +224,9 @@ func init() { {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem // Calls - {name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: callInter, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // Generic moves and zeros diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index eede8a654b..417b33cf91 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -475,9 +475,9 @@ func init() { {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // (InvertFlags (CMP a b)) == (CMP b a) // InvertFlags is a pseudo-op which can't appear in assembly output. diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go index 3286a68fb0..36c53bc78c 100644 --- a/src/cmd/compile/internal/ssa/gen/WasmOps.go +++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go @@ -122,9 +122,9 @@ func init() { ) var WasmOps = []opData{ - {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem {name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux+auxint, arg0=base {name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len/8, returns mem @@ -137,7 +137,7 @@ func init() { {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Sym", symEffect: "None"}, // invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier // LoweredConvert converts between pointers and integers. - // We have a special op for this so as to not confuse GC + // We have a special op for this so as to not confuse GCCallOff // (particularly stack maps). It takes a memory arg so it // gets correctly ordered with respect to GC safepoints. // arg0=ptr/int arg1=mem, output=int/ptr diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index acfe222089..145ba2d50c 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -387,9 +387,9 @@ var genericOps = []opData{ // as a phantom first argument. // TODO(josharian): ClosureCall and InterCall should have Int32 aux // to match StaticCall's 32 bit arg size limit. - {name: "ClosureCall", argLength: 3, aux: "Int64", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. - {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. - {name: "InterCall", argLength: 2, aux: "Int64", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. + {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. + {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. // Conversions: signed extensions, zero (unsigned) extensions, truncations {name: "SignExt8to16", argLength: 1, typ: "Int16"}, diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index 4a720fdede..ebd23b34c7 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -246,7 +246,7 @@ func insertLoopReschedChecks(f *Func) { // mem1 := call resched (mem0) // goto header resched := f.fe.Syslook("goschedguarded") - mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, &AuxCall{resched}, mem0) + mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched), mem0) sched.AddEdgeTo(h) headerMemPhi.AddArg(mem1) diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 3aa506e3ab..c498a288a1 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -78,6 +78,21 @@ func (a *AuxCall) String() string { return fmt.Sprintf("AuxCall(%v)", a.Fn) } +// StaticAuxCall returns an AuxCall for a static call. +func StaticAuxCall(sym *obj.LSym) *AuxCall { + return &AuxCall{Fn: sym} +} + +// InterfaceAuxCall returns an AuxCall for an interface call. +func InterfaceAuxCall() *AuxCall { + return &AuxCall{} +} + +// ClosureAuxCall returns an AuxCall for a closure call. +func ClosureAuxCall() *AuxCall { + return &AuxCall{} +} + const ( auxNone auxType = iota auxBool // auxInt is 0/1 for false/true diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 797d82f2d1..d95943231a 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -5826,7 +5826,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -5840,7 +5840,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -13161,7 +13161,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -13175,7 +13175,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -16930,7 +16930,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -16944,7 +16944,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -20563,7 +20563,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -20577,7 +20577,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -22263,7 +22263,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -22277,7 +22277,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -23809,7 +23809,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -23823,7 +23823,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -26508,7 +26508,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -26522,7 +26522,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -27790,7 +27790,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, call: true, reg: regInfo{ @@ -27803,7 +27803,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, call: true, reg: regInfo{ @@ -31388,7 +31388,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLclosure", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, clobberFlags: true, call: true, @@ -31402,7 +31402,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CALLinter", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, clobberFlags: true, call: true, @@ -32032,7 +32032,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredClosureCall", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, call: true, reg: regInfo{ @@ -32045,7 +32045,7 @@ var opcodeTable = [...]opInfo{ }, { name: "LoweredInterCall", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, call: true, reg: regInfo{ @@ -34763,7 +34763,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ClosureCall", - auxType: auxInt64, + auxType: auxCallOff, argLen: 3, call: true, generic: true, @@ -34777,7 +34777,7 @@ var opcodeTable = [...]opInfo{ }, { name: "InterCall", - auxType: auxInt64, + auxType: auxCallOff, argLen: 2, call: true, generic: true, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 8195d407e0..eb371ce38b 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -760,7 +760,7 @@ func devirt(v *Value, sym Sym, offset int64) *AuxCall { if lsym == nil { return nil } - return &AuxCall{Fn: lsym} + return StaticAuxCall(lsym) } // isSamePtr reports whether p1 and p2 point to the same address. diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 4b388a68cd..1ecfabf7cb 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -8483,7 +8483,7 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { // cond: devirt(v, itab, off) != nil // result: (StaticCall [int32(argsize)] {devirt(v, itab, off)} mem) for { - argsize := auxIntToInt64(v.AuxInt) + argsize := auxIntToInt32(v.AuxInt) if v_0.Op != OpLoad { break } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index c358406862..4322a85c90 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -523,7 +523,7 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va off = round(off, config.PtrSize) // issue call - mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, &AuxCall{fn}, mem) + mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn), mem) mem.AuxInt = off - config.ctxt.FixedFrameSize() return mem } -- cgit v1.3 From acde81e0a9c17ea23a6fc545b40bfabc80133f78 Mon Sep 17 00:00:00 2001 From: David Chase Date: Fri, 19 Jun 2020 15:29:51 -0400 Subject: cmd/compile: initialize ACArgs and ACResults AuxCall fields for static and interface calls. Extend use of AuxCall Change-Id: I68b6d9bad09506532e1415fd70d44cf6c15b4b93 Reviewed-on: https://go-review.googlesource.com/c/go/+/239081 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 53 ++++++++++++++++++----- src/cmd/compile/internal/ssa/gen/generic.rules | 4 +- src/cmd/compile/internal/ssa/loopreschedchecks.go | 2 +- src/cmd/compile/internal/ssa/op.go | 50 ++++++++++++++++++--- src/cmd/compile/internal/ssa/rewrite.go | 5 ++- src/cmd/compile/internal/ssa/rewritegeneric.go | 11 ++--- src/cmd/compile/internal/ssa/writebarrier.go | 6 ++- 7 files changed, 102 insertions(+), 29 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 542b1b51c2..6c0b027c17 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4280,16 +4280,20 @@ func (s *state) openDeferExit() { argStart := Ctxt.FixedFrameSize() fn := r.n.Left stksize := fn.Type.ArgWidth() + var ACArgs []ssa.Param + var ACResults []ssa.Param if r.rcvr != nil { // rcvr in case of OCALLINTER v := s.load(r.rcvr.Type.Elem(), r.rcvr) addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)}) s.store(types.Types[TUINTPTR], addr, v) } for j, argAddrVal := range r.argVals { f := getParam(r.n, j) pt := types.NewPtr(f.Type) addr := s.constOffPtrSP(pt, argStart+f.Offset) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart + f.Offset)}) if !canSSAType(f.Type) { s.move(f.Type, addr, argAddrVal) } else { @@ -4305,7 +4309,7 @@ func (s *state) openDeferExit() { call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, v, s.mem()) } else { // Do a static call if the original call was a static function or method - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym()), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults), s.mem()) } call.AuxInt = stksize s.vars[&memVar] = call @@ -4340,6 +4344,17 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { var codeptr *ssa.Value // ptr to target code (if dynamic) var rcvr *ssa.Value // receiver to set fn := n.Left + var ACArgs []ssa.Param + var ACResults []ssa.Param + res := n.Left.Type.Results() + if k == callNormal { + nf := res.NumFields() + for i := 0; i < nf; i++ { + fp := res.Field(i) + ACResults = append(ACResults, ssa.Param{Type: fp.Type, Offset: int32(fp.Offset + Ctxt.FixedFrameSize())}) + } + } + switch n.Op { case OCALLFUNC: if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC { @@ -4437,10 +4452,12 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // Call runtime.deferprocStack with pointer to _defer record. arg0 := s.constOffPtrSP(types.Types[TUINTPTR], Ctxt.FixedFrameSize()) s.store(types.Types[TUINTPTR], arg0, addr) - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack), s.mem()) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(Ctxt.FixedFrameSize())}) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferprocStack, ACArgs, ACResults), s.mem()) if stksize < int64(Widthptr) { // We need room for both the call to deferprocStack and the call to // the deferred function. + // TODO Revisit this if/when we pass args in registers. stksize = int64(Widthptr) } call.AuxInt = stksize @@ -4453,8 +4470,10 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // Write argsize and closure (args to newproc/deferproc). argsize := s.constInt32(types.Types[TUINT32], int32(stksize)) addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINT32], Offset: int32(argStart)}) s.store(types.Types[TUINT32], addr, argsize) addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr)) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart) + int32(Widthptr)}) s.store(types.Types[TUINTPTR], addr, closure) stksize += 2 * int64(Widthptr) argStart += 2 * int64(Widthptr) @@ -4463,6 +4482,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // Set receiver (for interface calls). if rcvr != nil { addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart) + ACArgs = append(ACArgs, ssa.Param{Type: types.Types[TUINTPTR], Offset: int32(argStart)}) s.store(types.Types[TUINTPTR], addr, rcvr) } @@ -4471,20 +4491,20 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { args := n.Rlist.Slice() if n.Op == OCALLMETH { f := t.Recv() - s.storeArg(args[0], f.Type, argStart+f.Offset) + ACArgs = append(ACArgs, s.storeArg(args[0], f.Type, argStart+f.Offset)) args = args[1:] } for i, n := range args { f := t.Params().Field(i) - s.storeArg(n, f.Type, argStart+f.Offset) + ACArgs = append(ACArgs, s.storeArg(n, f.Type, argStart+f.Offset)) } // call target switch { case k == callDefer: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(deferproc, ACArgs, ACResults), s.mem()) case k == callGo: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(newproc, ACArgs, ACResults), s.mem()) case closure != nil: // rawLoad because loading the code pointer from a // closure is always safe, but IsSanitizerSafeAddr @@ -4494,9 +4514,9 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { codeptr = s.rawLoad(types.Types[TUINTPTR], closure) call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, closure, s.mem()) case codeptr != nil: - call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(), codeptr, s.mem()) + call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) case sym != nil: - call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym()), s.mem()) + call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(sym.Linksym(), ACArgs, ACResults), s.mem()) default: s.Fatalf("bad call type %v %v", n.Op, n) } @@ -4522,7 +4542,6 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { s.startBlock(bNext) } - res := n.Left.Type.Results() if res.NumFields() == 0 || k != callNormal { // call has no return value. Continue with the next statement. return nil @@ -4918,18 +4937,29 @@ func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value { func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value { // Write args to the stack off := Ctxt.FixedFrameSize() + var ACArgs []ssa.Param + var ACResults []ssa.Param for _, arg := range args { t := arg.Type off = Rnd(off, t.Alignment()) ptr := s.constOffPtrSP(t.PtrTo(), off) size := t.Size() + ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)}) s.store(t, ptr, arg) off += size } off = Rnd(off, int64(Widthreg)) + // Accumulate results types and offsets + offR := off + for _, t := range results { + offR = Rnd(offR, t.Alignment()) + ACResults = append(ACResults, ssa.Param{Type: t, Offset: int32(offR)}) + offR += t.Size() + } + // Issue call - call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn), s.mem()) + call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn, ACArgs, ACResults), s.mem()) s.vars[&memVar] = call if !returns { @@ -5064,8 +5094,9 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { } } -func (s *state) storeArg(n *Node, t *types.Type, off int64) { +func (s *state) storeArg(n *Node, t *types.Type, off int64) ssa.Param { s.storeArgWithBase(n, t, s.sp, off) + return ssa.Param{Type: t, Offset: int32(off)} } func (s *state) storeArgWithBase(n *Node, t *types.Type, base *ssa.Value, off int64) { diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index df70838aa9..39f8cc8889 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -2021,8 +2021,8 @@ // Note that (ITab (IMake)) doesn't get // rewritten until after the first opt pass, // so this rule should trigger reliably. -(InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, itab, off) != nil => - (StaticCall [int32(argsize)] {devirt(v, itab, off)} mem) +(InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil => + (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem) // Move and Zero optimizations. // Move source and destination may overlap. diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index ebd23b34c7..9c73bcff26 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -246,7 +246,7 @@ func insertLoopReschedChecks(f *Func) { // mem1 := call resched (mem0) // goto header resched := f.fe.Syslook("goschedguarded") - mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched), mem0) + mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched, nil, nil), mem0) sched.AddEdgeTo(h) headerMemPhi.AddArg(mem1) diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index c498a288a1..f94399028a 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -5,6 +5,7 @@ package ssa import ( + "cmd/compile/internal/types" "cmd/internal/obj" "fmt" ) @@ -67,25 +68,60 @@ type regInfo struct { type auxType int8 +type Param struct { + Type *types.Type + Offset int32 // TODO someday this will be a register +} + type AuxCall struct { - Fn *obj.LSym + Fn *obj.LSym + args []Param // Includes receiver for method calls. Does NOT include hidden closure pointer. + results []Param } func (a *AuxCall) String() string { + var fn string if a.Fn == nil { - return "AuxCall(nil)" + fn = "AuxCall{nil" // could be interface/closure etc. + } else { + fn = fmt.Sprintf("AuxCall{%v", a.Fn) + } + + if len(a.args) == 0 { + fn += "()" + } else { + s := "(" + for _, arg := range a.args { + fn += fmt.Sprintf("%s[%v,%v]", s, arg.Type, arg.Offset) + s = "," + } + fn += ")" } - return fmt.Sprintf("AuxCall(%v)", a.Fn) + + if len(a.results) > 0 { // usual is zero or one; only some RT calls have more than one. + if len(a.results) == 1 { + fn += fmt.Sprintf("[%v,%v]", a.results[0].Type, a.results[0].Offset) + } else { + s := "(" + for _, result := range a.results { + fn += fmt.Sprintf("%s[%v,%v]", s, result.Type, result.Offset) + s = "," + } + fn += ")" + } + } + + return fn + "}" } // StaticAuxCall returns an AuxCall for a static call. -func StaticAuxCall(sym *obj.LSym) *AuxCall { - return &AuxCall{Fn: sym} +func StaticAuxCall(sym *obj.LSym, args []Param, results []Param) *AuxCall { + return &AuxCall{Fn: sym, args: args, results: results} } // InterfaceAuxCall returns an AuxCall for an interface call. -func InterfaceAuxCall() *AuxCall { - return &AuxCall{} +func InterfaceAuxCall(args []Param, results []Param) *AuxCall { + return &AuxCall{Fn: nil, args: args, results: results} } // ClosureAuxCall returns an AuxCall for a closure call. diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index eb371ce38b..2ab310ad85 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -743,7 +743,7 @@ func uaddOvf(a, b int64) bool { // de-virtualize an InterCall // 'sym' is the symbol for the itab -func devirt(v *Value, sym Sym, offset int64) *AuxCall { +func devirt(v *Value, aux interface{}, sym Sym, offset int64) *AuxCall { f := v.Block.Func n, ok := sym.(*obj.LSym) if !ok { @@ -760,7 +760,8 @@ func devirt(v *Value, sym Sym, offset int64) *AuxCall { if lsym == nil { return nil } - return StaticAuxCall(lsym) + va := aux.(*AuxCall) + return StaticAuxCall(lsym, va.args, va.results) } // isSamePtr reports whether p1 and p2 point to the same address. diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 1ecfabf7cb..925ff53fd1 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -8479,11 +8479,12 @@ func rewriteValuegeneric_OpIMake(v *Value) bool { func rewriteValuegeneric_OpInterCall(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (InterCall [argsize] (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) - // cond: devirt(v, itab, off) != nil - // result: (StaticCall [int32(argsize)] {devirt(v, itab, off)} mem) + // match: (InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) + // cond: devirt(v, auxCall, itab, off) != nil + // result: (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem) for { argsize := auxIntToInt32(v.AuxInt) + auxCall := auxToCall(v.Aux) if v_0.Op != OpLoad { break } @@ -8510,12 +8511,12 @@ func rewriteValuegeneric_OpInterCall(v *Value) bool { break } mem := v_1 - if !(devirt(v, itab, off) != nil) { + if !(devirt(v, auxCall, itab, off) != nil) { break } v.reset(OpStaticCall) v.AuxInt = int32ToAuxInt(int32(argsize)) - v.Aux = callToAux(devirt(v, itab, off)) + v.Aux = callToAux(devirt(v, auxCall, itab, off)) v.AddArg(mem) return true } diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index 4322a85c90..7cc8bf7af9 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -501,29 +501,33 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va // put arguments on stack off := config.ctxt.FixedFrameSize() + var ACArgs []Param if typ != nil { // for typedmemmove taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb) off = round(off, taddr.Type.Alignment()) arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp) mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem) + ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)}) off += taddr.Type.Size() } off = round(off, ptr.Type.Alignment()) arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp) mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem) + ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)}) off += ptr.Type.Size() if val != nil { off = round(off, val.Type.Alignment()) arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp) mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem) + ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)}) off += val.Type.Size() } off = round(off, config.PtrSize) // issue call - mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn), mem) + mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn, ACArgs, nil), mem) mem.AuxInt = off - config.ctxt.FixedFrameSize() return mem } -- cgit v1.3 From 39da81da5e35e50da74ebb8c4fe12fd363bf41b9 Mon Sep 17 00:00:00 2001 From: David Chase Date: Wed, 24 Jun 2020 17:00:48 -0400 Subject: cmd/compile: populate AuxCall fields for OpClosureCall Change-Id: Ib5f62826d5249c1727b57d9f8ff2f3a1d6dc5032 Reviewed-on: https://go-review.googlesource.com/c/go/+/240185 Trust: David Chase Run-TryBot: David Chase TryBot-Result: Go Bot Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/gc/ssa.go | 4 ++-- src/cmd/compile/internal/ssa/op.go | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 6c0b027c17..75fdbbae04 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -4306,7 +4306,7 @@ func (s *state) openDeferExit() { v := s.load(r.closure.Type.Elem(), r.closure) s.maybeNilCheckClosure(v, callDefer) codeptr := s.rawLoad(types.Types[TUINTPTR], v) - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, v, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, v, s.mem()) } else { // Do a static call if the original call was a static function or method call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, ssa.StaticAuxCall(fn.Sym.Linksym(), ACArgs, ACResults), s.mem()) @@ -4512,7 +4512,7 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { // critical that we not clobber any arguments already // stored onto the stack. codeptr = s.rawLoad(types.Types[TUINTPTR], closure) - call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(), codeptr, closure, s.mem()) + call = s.newValue3A(ssa.OpClosureCall, types.TypeMem, ssa.ClosureAuxCall(ACArgs, ACResults), codeptr, closure, s.mem()) case codeptr != nil: call = s.newValue2A(ssa.OpInterCall, types.TypeMem, ssa.InterfaceAuxCall(ACArgs, ACResults), codeptr, s.mem()) case sym != nil: diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index f94399028a..02ecdef5e6 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -79,6 +79,10 @@ type AuxCall struct { results []Param } +// String returns +// "AuxCall{()}" if len(results) == 0; +// "AuxCall{()}" if len(results) == 1; +// "AuxCall{()()}" otherwise. func (a *AuxCall) String() string { var fn string if a.Fn == nil { @@ -125,8 +129,8 @@ func InterfaceAuxCall(args []Param, results []Param) *AuxCall { } // ClosureAuxCall returns an AuxCall for a closure call. -func ClosureAuxCall() *AuxCall { - return &AuxCall{} +func ClosureAuxCall(args []Param, results []Param) *AuxCall { + return &AuxCall{Fn: nil, args: args, results: results} } const ( -- cgit v1.3 From 967465da2975fe4322080703ce5a77ea90752829 Mon Sep 17 00:00:00 2001 From: Lynn Boger Date: Mon, 31 Aug 2020 09:43:40 -0400 Subject: cmd/compile: use combined shifts to improve array addressing on ppc64x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change adds rules to find pairs of instructions that can be combined into a single shifts. These instruction sequences are common in array addressing within loops. Improvements can be seen in many crypto packages and the hash packages. These are based on the extended mnemonics found in the ISA sections C.8.1 and C.8.2. Some rules in PPC64.rules were moved because the ordering prevented some matching. The following results were generated on power9. hash/crc32: CRC32/poly=Koopman/size=40/align=0 195ns ± 0% 163ns ± 0% -16.41% CRC32/poly=Koopman/size=40/align=1 200ns ± 0% 163ns ± 0% -18.50% CRC32/poly=Koopman/size=512/align=0 1.98µs ± 0% 1.67µs ± 0% -15.46% CRC32/poly=Koopman/size=512/align=1 1.98µs ± 0% 1.69µs ± 0% -14.80% CRC32/poly=Koopman/size=1kB/align=0 3.90µs ± 0% 3.31µs ± 0% -15.27% CRC32/poly=Koopman/size=1kB/align=1 3.85µs ± 0% 3.31µs ± 0% -14.15% CRC32/poly=Koopman/size=4kB/align=0 15.3µs ± 0% 13.1µs ± 0% -14.22% CRC32/poly=Koopman/size=4kB/align=1 15.4µs ± 0% 13.1µs ± 0% -14.79% CRC32/poly=Koopman/size=32kB/align=0 137µs ± 0% 105µs ± 0% -23.56% CRC32/poly=Koopman/size=32kB/align=1 137µs ± 0% 105µs ± 0% -23.53% crypto/rc4: RC4_128 733ns ± 0% 650ns ± 0% -11.32% (p=1.000 n=1+1) RC4_1K 5.80µs ± 0% 5.17µs ± 0% -10.89% (p=1.000 n=1+1) RC4_8K 45.7µs ± 0% 40.8µs ± 0% -10.73% (p=1.000 n=1+1) crypto/sha1: Hash8Bytes 635ns ± 0% 613ns ± 0% -3.46% (p=1.000 n=1+1) Hash320Bytes 2.30µs ± 0% 2.18µs ± 0% -5.38% (p=1.000 n=1+1) Hash1K 5.88µs ± 0% 5.38µs ± 0% -8.62% (p=1.000 n=1+1) Hash8K 42.0µs ± 0% 37.9µs ± 0% -9.75% (p=1.000 n=1+1) There are other improvements found in golang.org/x/crypto which are all in the range of 5-15%. Change-Id: I193471fbcf674151ffe2edab212799d9b08dfb8c Reviewed-on: https://go-review.googlesource.com/c/go/+/252097 Trust: Lynn Boger Run-TryBot: Lynn Boger TryBot-Result: Go Bot Reviewed-by: Carlos Eduardo Seo --- src/cmd/asm/internal/asm/testdata/ppc64enc.s | 4 + src/cmd/compile/internal/ppc64/ssa.go | 36 ++ src/cmd/compile/internal/ssa/gen/PPC64.rules | 63 ++- src/cmd/compile/internal/ssa/gen/PPC64Ops.go | 5 + src/cmd/compile/internal/ssa/opGen.go | 45 ++ src/cmd/compile/internal/ssa/rewrite.go | 38 ++ src/cmd/compile/internal/ssa/rewritePPC64.go | 787 +++++++++++++++++++++++++++ src/cmd/internal/obj/ppc64/a.out.go | 4 + src/cmd/internal/obj/ppc64/anames.go | 4 + src/cmd/internal/obj/ppc64/asm9.go | 74 ++- test/codegen/shift.go | 55 ++ 11 files changed, 1087 insertions(+), 28 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/asm/internal/asm/testdata/ppc64enc.s b/src/cmd/asm/internal/asm/testdata/ppc64enc.s index 10a05ec402..e26f6f8933 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64enc.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64enc.s @@ -284,6 +284,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 RLDICLCC $0, R4, $15, R6 // 788603c1 RLDICR $0, R4, $15, R6 // 788603c4 RLDICRCC $0, R4, $15, R6 // 788603c5 + RLDIC $0, R4, $15, R6 // 788603c8 + RLDICCC $0, R4, $15, R6 // 788603c9 + CLRLSLWI $16, R5, $8, R4 // 54a4861e + CLRLSLDI $2, R4, $24, R3 // 78831588 BEQ 0(PC) // 41820000 BGE 0(PC) // 40800000 diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index f8d9ac2379..4a83a0bdd7 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -565,6 +565,42 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p = s.Prog(obj.ANOP) gc.Patch(pbover, p) + case ssa.OpPPC64CLRLSLWI: + r := v.Reg() + r1 := v.Args[0].Reg() + shifts := v.AuxInt + p := s.Prog(v.Op.Asm()) + // clrlslwi ra,rs,sh,mb will become rlwinm ra,rs,sh,mb-sh,31-n as described in ISA + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.OpPPC64CLRLSLDI: + r := v.Reg() + r1 := v.Args[0].Reg() + shifts := v.AuxInt + p := s.Prog(v.Op.Asm()) + // clrlsldi ra,rs,sh,mb will become rldic ra,rs,sh,mb-sh + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + // Mask has been set as sh + case ssa.OpPPC64RLDICL: + r := v.Reg() + r1 := v.Args[0].Reg() + shifts := v.AuxInt + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)} + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}) + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU, ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW, diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index e5fb1e98c2..774d5096de 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -79,6 +79,23 @@ (Abs ...) => (FABS ...) (FMA ...) => (FMADD ...) +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to(16|32|64) ...) => (MOVBreg ...) +(SignExt16to(32|64) ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...) +(ZeroExt16to(32|64) ...) => (MOVHZreg ...) +(ZeroExt32to64 ...) => (MOVWZreg ...) + +(Trunc(16|32|64)to8 x) && isSigned(t) => (MOVBreg x) +(Trunc(16|32|64)to8 x) => (MOVBZreg x) +(Trunc(32|64)to16 x) && isSigned(t) => (MOVHreg x) +(Trunc(32|64)to16 x) => (MOVHZreg x) +(Trunc64to32 x) && isSigned(t) => (MOVWreg x) +(Trunc64to32 x) => (MOVWZreg x) + // Lowering constants (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) (Const(32|64)F ...) => (FMOV(S|D)const ...) @@ -780,6 +797,21 @@ (MOVWreg y:(MOVWZreg x)) => (MOVWreg x) (MOVWZreg y:(MOVWreg x)) => (MOVWZreg x) +// Truncate then logical then truncate: omit first, lesser or equal truncate +(MOVWZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) x y)) +(MOVHZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) x y)) +(MOVHZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) + +(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z +(MOVBZreg z:(AND y (MOVBZload ptr x))) => z +(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z +(MOVHZreg z:(AND y (MOVHZload ptr x))) => z +(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z +(MOVWZreg z:(AND y (MOVWZload ptr x))) => z + // Arithmetic constant ops (ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x) @@ -949,23 +981,6 @@ (AtomicAnd8 ...) => (LoweredAtomicAnd8 ...) (AtomicOr8 ...) => (LoweredAtomicOr8 ...) -// Lowering extension -// Note: we always extend to 64 bits even though some ops don't need that many result bits. -(SignExt8to(16|32|64) ...) => (MOVBreg ...) -(SignExt16to(32|64) ...) => (MOVHreg ...) -(SignExt32to64 ...) => (MOVWreg ...) - -(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...) -(ZeroExt16to(32|64) ...) => (MOVHZreg ...) -(ZeroExt32to64 ...) => (MOVWZreg ...) - -(Trunc(16|32|64)to8 x) && isSigned(t) => (MOVBreg x) -(Trunc(16|32|64)to8 x) => (MOVBZreg x) -(Trunc(32|64)to16 x) && isSigned(t) => (MOVHreg x) -(Trunc(32|64)to16 x) => (MOVHZreg x) -(Trunc64to32 x) && isSigned(t) => (MOVWreg x) -(Trunc64to32 x) => (MOVWZreg x) - (Slicemask x) => (SRADconst (NEG x) [63]) // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide @@ -996,6 +1011,20 @@ (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) +// Implement clrsldi and clrslwi extended mnemonics as described in +// ISA 3.0 section C.8. AuxInt field contains values needed for +// the instructions, packed together since there is only one available. +(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x) +(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) +(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) + +(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) +(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) +(SLWconst [c] z:(MOVWZreg x)) && z.Uses == 1 && c < 24 => (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x) +(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) // Lose widening ops fed to stores (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 37706b2dd9..ed99c40cd2 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -206,6 +206,11 @@ func init() { {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 + // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA. + // The constant shift values are packed into the aux int32. + {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"}, // arg0 extract bits identified by shift params" + {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, // + {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, // {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index d95943231a..f00dc3f7f5 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -1853,6 +1853,9 @@ const ( OpPPC64SLW OpPPC64ROTL OpPPC64ROTLW + OpPPC64RLDICL + OpPPC64CLRLSLWI + OpPPC64CLRLSLDI OpPPC64LoweredAdd64Carry OpPPC64SRADconst OpPPC64SRAWconst @@ -24672,6 +24675,48 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "RLDICL", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ARLDICL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CLRLSLWI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLWI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "CLRLSLDI", + auxType: auxInt32, + argLen: 1, + asm: ppc64.ACLRLSLDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "LoweredAdd64Carry", argLen: 3, diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 2ab310ad85..d9c3e455a0 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -1325,6 +1325,44 @@ func hasSmallRotate(c *Config) bool { } } +func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 { + if sh < 0 || sh >= sz { + panic("PPC64 shift arg sh out of range") + } + if mb < 0 || mb >= sz { + panic("PPC64 shift arg mb out of range") + } + if me < 0 || me >= sz { + panic("PPC64 shift arg me out of range") + } + return int32(sh<<16 | mb<<8 | me) +} + +func GetPPC64Shiftsh(auxint int64) int64 { + return int64(int8(auxint >> 16)) +} + +func GetPPC64Shiftmb(auxint int64) int64 { + return int64(int8(auxint >> 8)) +} + +func GetPPC64Shiftme(auxint int64) int64 { + return int64(int8(auxint)) +} + +// Catch the simple ones first +// TODO: Later catch more cases +func isPPC64ValidShiftMask(v int64) bool { + if ((v + 1) & v) == 0 { + return true + } + return false +} + +func getPPC64ShiftMaskLength(v int64) int64 { + return int64(bits.Len64(uint64(v))) +} + // encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format. func armBFAuxInt(lsb, width int64) arm64BitField { if lsb < 0 || lsb > 63 { diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 152cdfdf4d..12b08824b5 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -586,8 +586,12 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64ROTLW(v) case OpPPC64SLD: return rewriteValuePPC64_OpPPC64SLD(v) + case OpPPC64SLDconst: + return rewriteValuePPC64_OpPPC64SLDconst(v) case OpPPC64SLW: return rewriteValuePPC64_OpPPC64SLW(v) + case OpPPC64SLWconst: + return rewriteValuePPC64_OpPPC64SLWconst(v) case OpPPC64SRAD: return rewriteValuePPC64_OpPPC64SRAD(v) case OpPPC64SRAW: @@ -6565,6 +6569,255 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBZreg (OR x (MOVWZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVWZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVWZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (OR x (MOVHZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVHZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVHZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (OR x (MOVBZreg y))) + // result: (MOVBZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (XOR x (MOVBZreg y))) + // result: (MOVBZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg (AND x (MOVBZreg y))) + // result: (MOVBZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVBZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVBZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVBZreg z:(AND y (MOVBZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVBZload { + continue + } + v.copyOf(z) + return true + } + break + } // match: (MOVBZreg x:(MOVBZload _ _)) // result: x for { @@ -8507,6 +8760,197 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { v.AddArg(x) return true } + // match: (MOVHZreg (OR x (MOVWZreg y))) + // result: (MOVHZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (XOR x (MOVWZreg y))) + // result: (MOVHZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (AND x (MOVWZreg y))) + // result: (MOVHZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (OR x (MOVHZreg y))) + // result: (MOVHZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (XOR x (MOVHZreg y))) + // result: (MOVHZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg (AND x (MOVHZreg y))) + // result: (MOVHZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVHZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVHZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVHZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVHZreg z:(AND y (MOVHZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVHZload { + continue + } + v.copyOf(z) + return true + } + break + } // match: (MOVHZreg x:(MOVBZload _ _)) // result: x for { @@ -9657,6 +10101,139 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { v.AddArg(x) return true } + // match: (MOVWZreg (OR x (MOVWZreg y))) + // result: (MOVWZreg (OR x y)) + for { + if v_0.Op != OpPPC64OR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64OR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg (XOR x (MOVWZreg y))) + // result: (MOVWZreg (XOR x y)) + for { + if v_0.Op != OpPPC64XOR { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64XOR, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg (AND x (MOVWZreg y))) + // result: (MOVWZreg (AND x y)) + for { + if v_0.Op != OpPPC64AND { + break + } + t := v_0.Type + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + v_0_1 := v_0.Args[1] + for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 { + x := v_0_0 + if v_0_1.Op != OpPPC64MOVWZreg { + continue + } + y := v_0_1.Args[0] + v.reset(OpPPC64MOVWZreg) + v0 := b.NewValue0(v.Pos, OpPPC64AND, t) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + break + } + // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVBZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVHZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + z_0 := z.Args[0] + if z_0.Op != OpPPC64MOVWZload { + break + } + v.copyOf(z) + return true + } + // match: (MOVWZreg z:(AND y (MOVWZload ptr x))) + // result: z + for { + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_1.Op != OpPPC64MOVWZload { + continue + } + v.copyOf(z) + return true + } + break + } // match: (MOVWZreg x:(MOVBZload _ _)) // result: x for { @@ -12197,6 +12774,111 @@ func rewriteValuePPC64_OpPPC64SLD(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLDconst [c] z:(MOVBZreg x)) + // cond: c < 8 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVBZreg { + break + } + x := z.Args[0] + if !(c < 8 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 56, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(MOVHZreg x)) + // cond: c < 16 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVHZreg { + break + } + x := z.Args[0] + if !(c < 16 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 48, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(MOVWZreg x)) + // cond: c < 32 && z.Uses == 1 + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWZreg { + break + } + x := z.Args[0] + if !(c < 32 && z.Uses == 1) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32, 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(ANDconst [d] x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + d := auxIntToInt64(z.AuxInt) + x := z.Args[0] + if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + break + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64)) + v.AddArg(x) + return true + } + // match: (SLDconst [c] z:(AND (MOVDconst [d]) x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_0.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(z_0.AuxInt) + x := z_1 + if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + continue + } + v.reset(OpPPC64CLRLSLDI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64)) + v.AddArg(x) + return true + } + break + } + return false +} func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -12215,6 +12897,111 @@ func rewriteValuePPC64_OpPPC64SLW(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLWconst [c] z:(MOVBZreg x)) + // cond: z.Uses == 1 && c < 8 + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVBZreg { + break + } + x := z.Args[0] + if !(z.Uses == 1 && c < 8) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 24, 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(MOVHZreg x)) + // cond: z.Uses == 1 && c < 16 + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVHZreg { + break + } + x := z.Args[0] + if !(z.Uses == 1 && c < 16) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 16, 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(MOVWZreg x)) + // cond: z.Uses == 1 && c < 24 + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,8,31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64MOVWZreg { + break + } + x := z.Args[0] + if !(z.Uses == 1 && c < 24) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 8, 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(ANDconst [d] x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64ANDconst { + break + } + d := auxIntToInt64(z.AuxInt) + x := z.Args[0] + if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + break + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32)) + v.AddArg(x) + return true + } + // match: (SLWconst [c] z:(AND (MOVDconst [d]) x)) + // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) + // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) + for { + c := auxIntToInt64(v.AuxInt) + z := v_0 + if z.Op != OpPPC64AND { + break + } + _ = z.Args[1] + z_0 := z.Args[0] + z_1 := z.Args[1] + for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 { + if z_0.Op != OpPPC64MOVDconst { + continue + } + d := auxIntToInt64(z_0.AuxInt) + x := z_1 + if !(z.Uses == 1 && isPPC64ValidShiftMask(d)) { + continue + } + v.reset(OpPPC64CLRLSLWI) + v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32)) + v.AddArg(x) + return true + } + break + } + return false +} func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index 8b32692778..f438803fb5 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -575,6 +575,7 @@ const ( ARLWMICC ARLWNM ARLWNMCC + ACLRLSLWI ASLW ASLWCC ASRW @@ -716,6 +717,9 @@ const ( ARLDCLCC ARLDICL ARLDICLCC + ARLDIC + ARLDICCC + ACLRLSLDI AROTL AROTLW ASLBIA diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go index 287011877c..accd87fe00 100644 --- a/src/cmd/internal/obj/ppc64/anames.go +++ b/src/cmd/internal/obj/ppc64/anames.go @@ -180,6 +180,7 @@ var Anames = []string{ "RLWMICC", "RLWNM", "RLWNMCC", + "CLRLSLWI", "SLW", "SLWCC", "SRW", @@ -312,6 +313,9 @@ var Anames = []string{ "RLDCLCC", "RLDICL", "RLDICLCC", + "RLDIC", + "RLDICCC", + "CLRLSLDI", "ROTL", "ROTLW", "SLBIA", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 98b453de6c..60dda72507 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -1904,6 +1904,7 @@ func buildop(ctxt *obj.Link) { opset(ARLWMICC, r0) opset(ARLWNM, r0) opset(ARLWNMCC, r0) + opset(ACLRLSLWI, r0) case ARLDMI: opset(ARLDMICC, r0) @@ -1922,6 +1923,9 @@ func buildop(ctxt *obj.Link) { opset(ARLDICLCC, r0) opset(ARLDICR, r0) opset(ARLDICRCC, r0) + opset(ARLDIC, r0) + opset(ARLDICCC, r0) + opset(ACLRLSLDI, r0) case AFMOVD: opset(AFMOVDCC, r0) @@ -2734,13 +2738,31 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case ARLDICR, ARLDICRCC: me := int(d) sh := c.regoff(&p.From) + if me < 0 || me > 63 || sh > 63 { + c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh) + } o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me)) - case ARLDICL, ARLDICLCC: + case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: mb := int(d) sh := c.regoff(&p.From) + if mb < 0 || mb > 63 || sh > 63 { + c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh) + } o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb)) + case ACLRLSLDI: + // This is an extended mnemonic defined in the ISA section C.8.1 + // clrlsldi ra,rs,n,b --> rldic ra,rs,n,b-n + // It maps onto RLDIC so is directly generated here based on the operands from + // the clrlsldi. + b := int(d) + n := c.regoff(&p.From) + if n > int32(b) || b > 63 { + c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b) + } + o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) + default: c.ctxt.Diag("unexpected op in rldc case\n%v", p) a = 0 @@ -3354,18 +3376,43 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 62: /* rlwmi $sh,s,$mask,a */ v := c.regoff(&p.From) - - var mask [2]uint8 - c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) - o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) - o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + switch p.As { + case ACLRLSLWI: + b := c.regoff(p.GetFrom3()) + // This is an extended mnemonic described in the ISA C.8.2 + // clrlslwi ra,rs,n,b -> rlwinm ra,rs,n,b-n,31-n + // It maps onto rlwinm which is directly generated here. + if v < 0 || v > 32 || b > 32 { + c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, b) + } + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(v), uint32(b-v), uint32(31-v)) + default: + var mask [2]uint8 + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) + o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + } case 63: /* rlwmi b,s,$mask,a */ - var mask [2]uint8 - c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) - - o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg)) - o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + v := c.regoff(&p.From) + switch p.As { + case ACLRLSLWI: + b := c.regoff(p.GetFrom3()) + if v > b || b > 32 { + // Message will match operands from the ISA even though in the + // code it uses 'v' + c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, b) + } + // This is an extended mnemonic described in the ISA C.8.2 + // clrlslwi ra,rs,n,b -> rlwinm ra,rs,n,b-n,31-n + // It generates the rlwinm directly here. + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(v), uint32(b-v), uint32(31-v)) + default: + var mask [2]uint8 + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) + o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + } case 64: /* mtfsf fr[, $m] {,fpcsr} */ var v int32 @@ -4277,6 +4324,11 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case ARLDICRCC: return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr. + case ARLDIC: + return OPVCC(30, 0, 0, 0) | 4<<1 // rldic + case ARLDICCC: + return OPVCC(30, 0, 0, 1) | 4<<1 // rldic. + case ASYSCALL: return OPVCC(17, 1, 0, 0) diff --git a/test/codegen/shift.go b/test/codegen/shift.go index 5e50ea6bff..32214851b5 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -150,6 +150,61 @@ func lshGuarded64(v int64, s uint) int64 { panic("shift too large") } +func checkUnneededTrunc(tab *[100000]uint32, d uint64, v uint32, h uint16, b byte) (uint32, uint64) { + + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f := tab[byte(v)^b] + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f += tab[byte(v)&b] + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f += tab[byte(v)|b] + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f += tab[uint16(v)&h] + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f += tab[uint16(v)^h] + // ppc64le:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + // ppc64:-".*RLWINM",-".*RLDICR",".*CLRLSLDI" + f += tab[uint16(v)|h] + // ppc64le:-".*AND",-"RLDICR",".*CLRLSLDI" + // ppc64:-".*AND",-"RLDICR",".*CLRLSLDI" + f += tab[v&0xff] + // ppc64le:-".*AND",".*CLRLSLWI" + // ppc64:-".*AND",".*CLRLSLWI" + f += 2*uint32(uint16(d)) + // ppc64le:-".*AND",-"RLDICR",".*CLRLSLDI" + // ppc64:-".*AND",-"RLDICR",".*CLRLSLDI" + g := 2*uint64(uint32(d)) + return f, g +} + +func checkCombinedShifts(v8 uint8, v16 uint16, v32 uint32, v64 uint64) (uint8, uint16, uint32, uint64) { + + // ppc64le:-"AND","CLRLSLWI" + // ppc64:-"AND","CLRLSLWI" + f := (v8 &0xF) << 2 + // ppc64le:-"AND","CLRLSLWI" + // ppc64:-"AND","CLRLSLWI" + f += byte(v16)<<3 + // ppc64le:-"AND","CLRLSLWI" + // ppc64:-"AND","CLRLSLWI" + g := (v16 & 0xFF) << 3 + // ppc64le:-"AND","CLRLSLWI" + // ppc64:-"AND","CLRLSLWI" + h := (v32 & 0xFFFFF) << 2 + // ppc64le:-"AND","CLRLSLWI" + // ppc64:-"AND","CLRLSLWI" + h += uint32(v64)<<4 + // ppc64le:-"AND","CLRLSLDI" + // ppc64:-"AND","CLRLSLDI" + i := (v64 & 0xFFFFFFFF) << 5 + return f, g, h, i +} + func checkWidenAfterShift(v int64, u uint64) (int64, uint64) { // ppc64le:-".*MOVW" -- cgit v1.3 From 35e413c5373538ca6e0516f454f78c74477eff2c Mon Sep 17 00:00:00 2001 From: David Chase Date: Mon, 15 Jun 2020 14:20:36 -0400 Subject: cmd/compile: add new ops for experiment with late call expansion Added Dereference, StaticLECall, SelectN, SelectNAddr Change-Id: I5426ae488e83956539aa07f7415b8acc26c933e6 Reviewed-on: https://go-review.googlesource.com/c/go/+/239082 Trust: David Chase Run-TryBot: David Chase Reviewed-by: Cherry Zhang TryBot-Result: Go Bot --- src/cmd/compile/internal/ssa/gen/genericOps.go | 15 +++++++++----- src/cmd/compile/internal/ssa/opGen.go | 28 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 5 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index 145ba2d50c..95edff4c8c 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -346,6 +346,7 @@ var genericOps = []opData{ // Memory operations {name: "Load", argLength: 2}, // Load from arg0. arg1=memory + {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value". {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. // The source and destination of Move may overlap in some cases. See e.g. // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go) @@ -387,9 +388,11 @@ var genericOps = []opData{ // as a phantom first argument. // TODO(josharian): ClosureCall and InterCall should have Int32 aux // to match StaticCall's 32 bit arg size limit. - {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. - {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. - {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff? + {name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory. + {name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory. + {name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory. + {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. // Conversions: signed extensions, zero (unsigned) extensions, truncations {name: "SignExt8to16", argLength: 1, typ: "Int16"}, @@ -531,8 +534,10 @@ var genericOps = []opData{ {name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction // pseudo-ops for breaking Tuple - {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple - {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple + {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple + {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple + {name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the auxint'th member. + {name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=tuple, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types. // Atomic operations used for semantically inlining runtime/internal/atomic. // Atomic loads return a new memory so that the loads are properly ordered diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index f00dc3f7f5..1fc0f7ea79 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2717,6 +2717,7 @@ const ( OpSP OpSB OpLoad + OpDereference OpStore OpMove OpZero @@ -2730,6 +2731,7 @@ const ( OpClosureCall OpStaticCall OpInterCall + OpStaticLECall OpSignExt8to16 OpSignExt8to32 OpSignExt8to64 @@ -2826,6 +2828,8 @@ const ( OpCvt64Fto64U OpSelect0 OpSelect1 + OpSelectN + OpSelectNAddr OpAtomicLoad8 OpAtomicLoad32 OpAtomicLoad64 @@ -34742,6 +34746,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "Dereference", + argLen: 2, + generic: true, + }, { name: "Store", auxType: auxTyp, @@ -34827,6 +34836,13 @@ var opcodeTable = [...]opInfo{ call: true, generic: true, }, + { + name: "StaticLECall", + auxType: auxCallOff, + argLen: -1, + call: true, + generic: true, + }, { name: "SignExt8to16", argLen: 1, @@ -35328,6 +35344,18 @@ var opcodeTable = [...]opInfo{ zeroWidth: true, generic: true, }, + { + name: "SelectN", + auxType: auxInt64, + argLen: 1, + generic: true, + }, + { + name: "SelectNAddr", + auxType: auxInt64, + argLen: 1, + generic: true, + }, { name: "AtomicLoad8", argLen: 2, -- cgit v1.3 From 0b1cec7ad33a073be15db89da90efcba9797df83 Mon Sep 17 00:00:00 2001 From: root <2863768433@qq.com> Date: Sun, 30 Aug 2020 11:36:45 +0800 Subject: cmd/compile: rotate phase's title 180 degrees in ssa/html.go Modify phase's title according to html.go:122 TODO. Fixes #41098 Change-Id: I58fa365e718600aaaa0a72cce72d35a484cde8b8 Reviewed-on: https://go-review.googlesource.com/c/go/+/251657 Reviewed-by: Bradford Lamson-Scribner Reviewed-by: Keith Randall Trust: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/html.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index ba37a80412..1c70b64708 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -119,7 +119,8 @@ td.collapsed { } td.collapsed div { - /* TODO: Flip the direction of the phase's title 90 degrees on a collapsed column. */ + text-align: right; + transform: rotate(180deg); writing-mode: vertical-lr; white-space: pre; } -- cgit v1.3 From 234e23d76351c31b191e25b688aa43248d9b3d5b Mon Sep 17 00:00:00 2001 From: root <2863768433@qq.com> Date: Mon, 17 Aug 2020 10:03:06 +0800 Subject: cmd/compile: make expanded/hidden columns in GOSSAFUNC persist across reloads use pushState with updated state and read it on page load,so that state can survive across reloads. Change-Id: I6c5e80e9747576245b979a62cb96d231d8f27d57 Reviewed-on: https://go-review.googlesource.com/c/go/+/248687 Run-TryBot: Alberto Donizetti TryBot-Result: Go Bot Reviewed-by: Bradford Lamson-Scribner Reviewed-by: Keith Randall Trust: Josh Bleecher Snyder --- src/cmd/compile/internal/ssa/html.go | 48 ++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 16 deletions(-) (limited to 'src/cmd/compile/internal/ssa') diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index 1c70b64708..c781ca92cc 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -358,6 +358,21 @@ body.darkmode ellipse.outline-black { outline: gray solid 2px; }