diff options
Diffstat (limited to 'src/cmd/compile/internal/ssa/rewriteAMD64.go')
| -rw-r--r-- | src/cmd/compile/internal/ssa/rewriteAMD64.go | 15394 |
1 files changed, 15394 insertions, 0 deletions
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go new file mode 100644 index 0000000000..83fc437747 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -0,0 +1,15394 @@ +// autogenerated from gen/AMD64.rules: do not edit! +// generated with: cd gen; go run *.go + +package ssa + +import "math" + +var _ = math.MinInt8 // in case not otherwise used +func rewriteValueAMD64(v *Value, config *Config) bool { + switch v.Op { + case OpAMD64ADDB: + return rewriteValueAMD64_OpAMD64ADDB(v, config) + case OpAMD64ADDBconst: + return rewriteValueAMD64_OpAMD64ADDBconst(v, config) + case OpAMD64ADDL: + return rewriteValueAMD64_OpAMD64ADDL(v, config) + case OpAMD64ADDLconst: + return rewriteValueAMD64_OpAMD64ADDLconst(v, config) + case OpAMD64ADDQ: + return rewriteValueAMD64_OpAMD64ADDQ(v, config) + case OpAMD64ADDQconst: + return rewriteValueAMD64_OpAMD64ADDQconst(v, config) + case OpAMD64ADDW: + return rewriteValueAMD64_OpAMD64ADDW(v, config) + case OpAMD64ADDWconst: + return rewriteValueAMD64_OpAMD64ADDWconst(v, config) + case OpAMD64ANDB: + return rewriteValueAMD64_OpAMD64ANDB(v, config) + case OpAMD64ANDBconst: + return rewriteValueAMD64_OpAMD64ANDBconst(v, config) + case OpAMD64ANDL: + return rewriteValueAMD64_OpAMD64ANDL(v, config) + case OpAMD64ANDLconst: + return rewriteValueAMD64_OpAMD64ANDLconst(v, config) + case OpAMD64ANDQ: + return rewriteValueAMD64_OpAMD64ANDQ(v, config) + case OpAMD64ANDQconst: + return rewriteValueAMD64_OpAMD64ANDQconst(v, config) + case OpAMD64ANDW: + return rewriteValueAMD64_OpAMD64ANDW(v, config) + case OpAMD64ANDWconst: + return rewriteValueAMD64_OpAMD64ANDWconst(v, config) + case OpAdd16: + return rewriteValueAMD64_OpAdd16(v, config) + case OpAdd32: + return rewriteValueAMD64_OpAdd32(v, config) + case OpAdd32F: + return rewriteValueAMD64_OpAdd32F(v, config) + case OpAdd64: + return rewriteValueAMD64_OpAdd64(v, config) + case OpAdd64F: + return rewriteValueAMD64_OpAdd64F(v, config) + case OpAdd8: + return rewriteValueAMD64_OpAdd8(v, config) + case OpAddPtr: + return rewriteValueAMD64_OpAddPtr(v, config) + case OpAddr: + return rewriteValueAMD64_OpAddr(v, config) + case OpAnd16: + return rewriteValueAMD64_OpAnd16(v, config) + case OpAnd32: + return rewriteValueAMD64_OpAnd32(v, config) + case OpAnd64: + return rewriteValueAMD64_OpAnd64(v, config) + case OpAnd8: + return rewriteValueAMD64_OpAnd8(v, config) + case OpAvg64u: + return rewriteValueAMD64_OpAvg64u(v, config) + case OpAMD64CMPB: + return rewriteValueAMD64_OpAMD64CMPB(v, config) + case OpAMD64CMPBconst: + return rewriteValueAMD64_OpAMD64CMPBconst(v, config) + case OpAMD64CMPL: + return rewriteValueAMD64_OpAMD64CMPL(v, config) + case OpAMD64CMPLconst: + return rewriteValueAMD64_OpAMD64CMPLconst(v, config) + case OpAMD64CMPQ: + return rewriteValueAMD64_OpAMD64CMPQ(v, config) + case OpAMD64CMPQconst: + return rewriteValueAMD64_OpAMD64CMPQconst(v, config) + case OpAMD64CMPW: + return rewriteValueAMD64_OpAMD64CMPW(v, config) + case OpAMD64CMPWconst: + return rewriteValueAMD64_OpAMD64CMPWconst(v, config) + case OpClosureCall: + return rewriteValueAMD64_OpClosureCall(v, config) + case OpCom16: + return rewriteValueAMD64_OpCom16(v, config) + case OpCom32: + return rewriteValueAMD64_OpCom32(v, config) + case OpCom64: + return rewriteValueAMD64_OpCom64(v, config) + case OpCom8: + return rewriteValueAMD64_OpCom8(v, config) + case OpConst16: + return rewriteValueAMD64_OpConst16(v, config) + case OpConst32: + return rewriteValueAMD64_OpConst32(v, config) + case OpConst32F: + return rewriteValueAMD64_OpConst32F(v, config) + case OpConst64: + return rewriteValueAMD64_OpConst64(v, config) + case OpConst64F: + return rewriteValueAMD64_OpConst64F(v, config) + case OpConst8: + return rewriteValueAMD64_OpConst8(v, config) + case OpConstBool: + return rewriteValueAMD64_OpConstBool(v, config) + case OpConstNil: + return rewriteValueAMD64_OpConstNil(v, config) + case OpConvert: + return rewriteValueAMD64_OpConvert(v, config) + case OpCvt32Fto32: + return rewriteValueAMD64_OpCvt32Fto32(v, config) + case OpCvt32Fto64: + return rewriteValueAMD64_OpCvt32Fto64(v, config) + case OpCvt32Fto64F: + return rewriteValueAMD64_OpCvt32Fto64F(v, config) + case OpCvt32to32F: + return rewriteValueAMD64_OpCvt32to32F(v, config) + case OpCvt32to64F: + return rewriteValueAMD64_OpCvt32to64F(v, config) + case OpCvt64Fto32: + return rewriteValueAMD64_OpCvt64Fto32(v, config) + case OpCvt64Fto32F: + return rewriteValueAMD64_OpCvt64Fto32F(v, config) + case OpCvt64Fto64: + return rewriteValueAMD64_OpCvt64Fto64(v, config) + case OpCvt64to32F: + return rewriteValueAMD64_OpCvt64to32F(v, config) + case OpCvt64to64F: + return rewriteValueAMD64_OpCvt64to64F(v, config) + case OpDeferCall: + return rewriteValueAMD64_OpDeferCall(v, config) + case OpDiv16: + return rewriteValueAMD64_OpDiv16(v, config) + case OpDiv16u: + return rewriteValueAMD64_OpDiv16u(v, config) + case OpDiv32: + return rewriteValueAMD64_OpDiv32(v, config) + case OpDiv32F: + return rewriteValueAMD64_OpDiv32F(v, config) + case OpDiv32u: + return rewriteValueAMD64_OpDiv32u(v, config) + case OpDiv64: + return rewriteValueAMD64_OpDiv64(v, config) + case OpDiv64F: + return rewriteValueAMD64_OpDiv64F(v, config) + case OpDiv64u: + return rewriteValueAMD64_OpDiv64u(v, config) + case OpDiv8: + return rewriteValueAMD64_OpDiv8(v, config) + case OpDiv8u: + return rewriteValueAMD64_OpDiv8u(v, config) + case OpEq16: + return rewriteValueAMD64_OpEq16(v, config) + case OpEq32: + return rewriteValueAMD64_OpEq32(v, config) + case OpEq32F: + return rewriteValueAMD64_OpEq32F(v, config) + case OpEq64: + return rewriteValueAMD64_OpEq64(v, config) + case OpEq64F: + return rewriteValueAMD64_OpEq64F(v, config) + case OpEq8: + return rewriteValueAMD64_OpEq8(v, config) + case OpEqPtr: + return rewriteValueAMD64_OpEqPtr(v, config) + case OpGeq16: + return rewriteValueAMD64_OpGeq16(v, config) + case OpGeq16U: + return rewriteValueAMD64_OpGeq16U(v, config) + case OpGeq32: + return rewriteValueAMD64_OpGeq32(v, config) + case OpGeq32F: + return rewriteValueAMD64_OpGeq32F(v, config) + case OpGeq32U: + return rewriteValueAMD64_OpGeq32U(v, config) + case OpGeq64: + return rewriteValueAMD64_OpGeq64(v, config) + case OpGeq64F: + return rewriteValueAMD64_OpGeq64F(v, config) + case OpGeq64U: + return rewriteValueAMD64_OpGeq64U(v, config) + case OpGeq8: + return rewriteValueAMD64_OpGeq8(v, config) + case OpGeq8U: + return rewriteValueAMD64_OpGeq8U(v, config) + case OpGetClosurePtr: + return rewriteValueAMD64_OpGetClosurePtr(v, config) + case OpGetG: + return rewriteValueAMD64_OpGetG(v, config) + case OpGoCall: + return rewriteValueAMD64_OpGoCall(v, config) + case OpGreater16: + return rewriteValueAMD64_OpGreater16(v, config) + case OpGreater16U: + return rewriteValueAMD64_OpGreater16U(v, config) + case OpGreater32: + return rewriteValueAMD64_OpGreater32(v, config) + case OpGreater32F: + return rewriteValueAMD64_OpGreater32F(v, config) + case OpGreater32U: + return rewriteValueAMD64_OpGreater32U(v, config) + case OpGreater64: + return rewriteValueAMD64_OpGreater64(v, config) + case OpGreater64F: + return rewriteValueAMD64_OpGreater64F(v, config) + case OpGreater64U: + return rewriteValueAMD64_OpGreater64U(v, config) + case OpGreater8: + return rewriteValueAMD64_OpGreater8(v, config) + case OpGreater8U: + return rewriteValueAMD64_OpGreater8U(v, config) + case OpHmul16: + return rewriteValueAMD64_OpHmul16(v, config) + case OpHmul16u: + return rewriteValueAMD64_OpHmul16u(v, config) + case OpHmul32: + return rewriteValueAMD64_OpHmul32(v, config) + case OpHmul32u: + return rewriteValueAMD64_OpHmul32u(v, config) + case OpHmul64: + return rewriteValueAMD64_OpHmul64(v, config) + case OpHmul64u: + return rewriteValueAMD64_OpHmul64u(v, config) + case OpHmul8: + return rewriteValueAMD64_OpHmul8(v, config) + case OpHmul8u: + return rewriteValueAMD64_OpHmul8u(v, config) + case OpITab: + return rewriteValueAMD64_OpITab(v, config) + case OpInterCall: + return rewriteValueAMD64_OpInterCall(v, config) + case OpIsInBounds: + return rewriteValueAMD64_OpIsInBounds(v, config) + case OpIsNonNil: + return rewriteValueAMD64_OpIsNonNil(v, config) + case OpIsSliceInBounds: + return rewriteValueAMD64_OpIsSliceInBounds(v, config) + case OpAMD64LEAQ: + return rewriteValueAMD64_OpAMD64LEAQ(v, config) + case OpAMD64LEAQ1: + return rewriteValueAMD64_OpAMD64LEAQ1(v, config) + case OpAMD64LEAQ2: + return rewriteValueAMD64_OpAMD64LEAQ2(v, config) + case OpAMD64LEAQ4: + return rewriteValueAMD64_OpAMD64LEAQ4(v, config) + case OpAMD64LEAQ8: + return rewriteValueAMD64_OpAMD64LEAQ8(v, config) + case OpLeq16: + return rewriteValueAMD64_OpLeq16(v, config) + case OpLeq16U: + return rewriteValueAMD64_OpLeq16U(v, config) + case OpLeq32: + return rewriteValueAMD64_OpLeq32(v, config) + case OpLeq32F: + return rewriteValueAMD64_OpLeq32F(v, config) + case OpLeq32U: + return rewriteValueAMD64_OpLeq32U(v, config) + case OpLeq64: + return rewriteValueAMD64_OpLeq64(v, config) + case OpLeq64F: + return rewriteValueAMD64_OpLeq64F(v, config) + case OpLeq64U: + return rewriteValueAMD64_OpLeq64U(v, config) + case OpLeq8: + return rewriteValueAMD64_OpLeq8(v, config) + case OpLeq8U: + return rewriteValueAMD64_OpLeq8U(v, config) + case OpLess16: + return rewriteValueAMD64_OpLess16(v, config) + case OpLess16U: + return rewriteValueAMD64_OpLess16U(v, config) + case OpLess32: + return rewriteValueAMD64_OpLess32(v, config) + case OpLess32F: + return rewriteValueAMD64_OpLess32F(v, config) + case OpLess32U: + return rewriteValueAMD64_OpLess32U(v, config) + case OpLess64: + return rewriteValueAMD64_OpLess64(v, config) + case OpLess64F: + return rewriteValueAMD64_OpLess64F(v, config) + case OpLess64U: + return rewriteValueAMD64_OpLess64U(v, config) + case OpLess8: + return rewriteValueAMD64_OpLess8(v, config) + case OpLess8U: + return rewriteValueAMD64_OpLess8U(v, config) + case OpLoad: + return rewriteValueAMD64_OpLoad(v, config) + case OpLrot16: + return rewriteValueAMD64_OpLrot16(v, config) + case OpLrot32: + return rewriteValueAMD64_OpLrot32(v, config) + case OpLrot64: + return rewriteValueAMD64_OpLrot64(v, config) + case OpLrot8: + return rewriteValueAMD64_OpLrot8(v, config) + case OpLsh16x16: + return rewriteValueAMD64_OpLsh16x16(v, config) + case OpLsh16x32: + return rewriteValueAMD64_OpLsh16x32(v, config) + case OpLsh16x64: + return rewriteValueAMD64_OpLsh16x64(v, config) + case OpLsh16x8: + return rewriteValueAMD64_OpLsh16x8(v, config) + case OpLsh32x16: + return rewriteValueAMD64_OpLsh32x16(v, config) + case OpLsh32x32: + return rewriteValueAMD64_OpLsh32x32(v, config) + case OpLsh32x64: + return rewriteValueAMD64_OpLsh32x64(v, config) + case OpLsh32x8: + return rewriteValueAMD64_OpLsh32x8(v, config) + case OpLsh64x16: + return rewriteValueAMD64_OpLsh64x16(v, config) + case OpLsh64x32: + return rewriteValueAMD64_OpLsh64x32(v, config) + case OpLsh64x64: + return rewriteValueAMD64_OpLsh64x64(v, config) + case OpLsh64x8: + return rewriteValueAMD64_OpLsh64x8(v, config) + case OpLsh8x16: + return rewriteValueAMD64_OpLsh8x16(v, config) + case OpLsh8x32: + return rewriteValueAMD64_OpLsh8x32(v, config) + case OpLsh8x64: + return rewriteValueAMD64_OpLsh8x64(v, config) + case OpLsh8x8: + return rewriteValueAMD64_OpLsh8x8(v, config) + case OpAMD64MOVBQSX: + return rewriteValueAMD64_OpAMD64MOVBQSX(v, config) + case OpAMD64MOVBQZX: + return rewriteValueAMD64_OpAMD64MOVBQZX(v, config) + case OpAMD64MOVBload: + return rewriteValueAMD64_OpAMD64MOVBload(v, config) + case OpAMD64MOVBloadidx1: + return rewriteValueAMD64_OpAMD64MOVBloadidx1(v, config) + case OpAMD64MOVBstore: + return rewriteValueAMD64_OpAMD64MOVBstore(v, config) + case OpAMD64MOVBstoreconst: + return rewriteValueAMD64_OpAMD64MOVBstoreconst(v, config) + case OpAMD64MOVBstoreconstidx1: + return rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v, config) + case OpAMD64MOVBstoreidx1: + return rewriteValueAMD64_OpAMD64MOVBstoreidx1(v, config) + case OpAMD64MOVLQSX: + return rewriteValueAMD64_OpAMD64MOVLQSX(v, config) + case OpAMD64MOVLQZX: + return rewriteValueAMD64_OpAMD64MOVLQZX(v, config) + case OpAMD64MOVLload: + return rewriteValueAMD64_OpAMD64MOVLload(v, config) + case OpAMD64MOVLloadidx4: + return rewriteValueAMD64_OpAMD64MOVLloadidx4(v, config) + case OpAMD64MOVLstore: + return rewriteValueAMD64_OpAMD64MOVLstore(v, config) + case OpAMD64MOVLstoreconst: + return rewriteValueAMD64_OpAMD64MOVLstoreconst(v, config) + case OpAMD64MOVLstoreconstidx4: + return rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v, config) + case OpAMD64MOVLstoreidx4: + return rewriteValueAMD64_OpAMD64MOVLstoreidx4(v, config) + case OpAMD64MOVOload: + return rewriteValueAMD64_OpAMD64MOVOload(v, config) + case OpAMD64MOVOstore: + return rewriteValueAMD64_OpAMD64MOVOstore(v, config) + case OpAMD64MOVQload: + return rewriteValueAMD64_OpAMD64MOVQload(v, config) + case OpAMD64MOVQloadidx8: + return rewriteValueAMD64_OpAMD64MOVQloadidx8(v, config) + case OpAMD64MOVQstore: + return rewriteValueAMD64_OpAMD64MOVQstore(v, config) + case OpAMD64MOVQstoreconst: + return rewriteValueAMD64_OpAMD64MOVQstoreconst(v, config) + case OpAMD64MOVQstoreconstidx8: + return rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v, config) + case OpAMD64MOVQstoreidx8: + return rewriteValueAMD64_OpAMD64MOVQstoreidx8(v, config) + case OpAMD64MOVSDload: + return rewriteValueAMD64_OpAMD64MOVSDload(v, config) + case OpAMD64MOVSDloadidx8: + return rewriteValueAMD64_OpAMD64MOVSDloadidx8(v, config) + case OpAMD64MOVSDstore: + return rewriteValueAMD64_OpAMD64MOVSDstore(v, config) + case OpAMD64MOVSDstoreidx8: + return rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v, config) + case OpAMD64MOVSSload: + return rewriteValueAMD64_OpAMD64MOVSSload(v, config) + case OpAMD64MOVSSloadidx4: + return rewriteValueAMD64_OpAMD64MOVSSloadidx4(v, config) + case OpAMD64MOVSSstore: + return rewriteValueAMD64_OpAMD64MOVSSstore(v, config) + case OpAMD64MOVSSstoreidx4: + return rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v, config) + case OpAMD64MOVWQSX: + return rewriteValueAMD64_OpAMD64MOVWQSX(v, config) + case OpAMD64MOVWQZX: + return rewriteValueAMD64_OpAMD64MOVWQZX(v, config) + case OpAMD64MOVWload: + return rewriteValueAMD64_OpAMD64MOVWload(v, config) + case OpAMD64MOVWloadidx2: + return rewriteValueAMD64_OpAMD64MOVWloadidx2(v, config) + case OpAMD64MOVWstore: + return rewriteValueAMD64_OpAMD64MOVWstore(v, config) + case OpAMD64MOVWstoreconst: + return rewriteValueAMD64_OpAMD64MOVWstoreconst(v, config) + case OpAMD64MOVWstoreconstidx2: + return rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v, config) + case OpAMD64MOVWstoreidx2: + return rewriteValueAMD64_OpAMD64MOVWstoreidx2(v, config) + case OpAMD64MULB: + return rewriteValueAMD64_OpAMD64MULB(v, config) + case OpAMD64MULBconst: + return rewriteValueAMD64_OpAMD64MULBconst(v, config) + case OpAMD64MULL: + return rewriteValueAMD64_OpAMD64MULL(v, config) + case OpAMD64MULLconst: + return rewriteValueAMD64_OpAMD64MULLconst(v, config) + case OpAMD64MULQ: + return rewriteValueAMD64_OpAMD64MULQ(v, config) + case OpAMD64MULQconst: + return rewriteValueAMD64_OpAMD64MULQconst(v, config) + case OpAMD64MULW: + return rewriteValueAMD64_OpAMD64MULW(v, config) + case OpAMD64MULWconst: + return rewriteValueAMD64_OpAMD64MULWconst(v, config) + case OpMod16: + return rewriteValueAMD64_OpMod16(v, config) + case OpMod16u: + return rewriteValueAMD64_OpMod16u(v, config) + case OpMod32: + return rewriteValueAMD64_OpMod32(v, config) + case OpMod32u: + return rewriteValueAMD64_OpMod32u(v, config) + case OpMod64: + return rewriteValueAMD64_OpMod64(v, config) + case OpMod64u: + return rewriteValueAMD64_OpMod64u(v, config) + case OpMod8: + return rewriteValueAMD64_OpMod8(v, config) + case OpMod8u: + return rewriteValueAMD64_OpMod8u(v, config) + case OpMove: + return rewriteValueAMD64_OpMove(v, config) + case OpMul16: + return rewriteValueAMD64_OpMul16(v, config) + case OpMul32: + return rewriteValueAMD64_OpMul32(v, config) + case OpMul32F: + return rewriteValueAMD64_OpMul32F(v, config) + case OpMul64: + return rewriteValueAMD64_OpMul64(v, config) + case OpMul64F: + return rewriteValueAMD64_OpMul64F(v, config) + case OpMul8: + return rewriteValueAMD64_OpMul8(v, config) + case OpAMD64NEGB: + return rewriteValueAMD64_OpAMD64NEGB(v, config) + case OpAMD64NEGL: + return rewriteValueAMD64_OpAMD64NEGL(v, config) + case OpAMD64NEGQ: + return rewriteValueAMD64_OpAMD64NEGQ(v, config) + case OpAMD64NEGW: + return rewriteValueAMD64_OpAMD64NEGW(v, config) + case OpAMD64NOTB: + return rewriteValueAMD64_OpAMD64NOTB(v, config) + case OpAMD64NOTL: + return rewriteValueAMD64_OpAMD64NOTL(v, config) + case OpAMD64NOTQ: + return rewriteValueAMD64_OpAMD64NOTQ(v, config) + case OpAMD64NOTW: + return rewriteValueAMD64_OpAMD64NOTW(v, config) + case OpNeg16: + return rewriteValueAMD64_OpNeg16(v, config) + case OpNeg32: + return rewriteValueAMD64_OpNeg32(v, config) + case OpNeg32F: + return rewriteValueAMD64_OpNeg32F(v, config) + case OpNeg64: + return rewriteValueAMD64_OpNeg64(v, config) + case OpNeg64F: + return rewriteValueAMD64_OpNeg64F(v, config) + case OpNeg8: + return rewriteValueAMD64_OpNeg8(v, config) + case OpNeq16: + return rewriteValueAMD64_OpNeq16(v, config) + case OpNeq32: + return rewriteValueAMD64_OpNeq32(v, config) + case OpNeq32F: + return rewriteValueAMD64_OpNeq32F(v, config) + case OpNeq64: + return rewriteValueAMD64_OpNeq64(v, config) + case OpNeq64F: + return rewriteValueAMD64_OpNeq64F(v, config) + case OpNeq8: + return rewriteValueAMD64_OpNeq8(v, config) + case OpNeqPtr: + return rewriteValueAMD64_OpNeqPtr(v, config) + case OpNilCheck: + return rewriteValueAMD64_OpNilCheck(v, config) + case OpNot: + return rewriteValueAMD64_OpNot(v, config) + case OpAMD64ORB: + return rewriteValueAMD64_OpAMD64ORB(v, config) + case OpAMD64ORBconst: + return rewriteValueAMD64_OpAMD64ORBconst(v, config) + case OpAMD64ORL: + return rewriteValueAMD64_OpAMD64ORL(v, config) + case OpAMD64ORLconst: + return rewriteValueAMD64_OpAMD64ORLconst(v, config) + case OpAMD64ORQ: + return rewriteValueAMD64_OpAMD64ORQ(v, config) + case OpAMD64ORQconst: + return rewriteValueAMD64_OpAMD64ORQconst(v, config) + case OpAMD64ORW: + return rewriteValueAMD64_OpAMD64ORW(v, config) + case OpAMD64ORWconst: + return rewriteValueAMD64_OpAMD64ORWconst(v, config) + case OpOffPtr: + return rewriteValueAMD64_OpOffPtr(v, config) + case OpOr16: + return rewriteValueAMD64_OpOr16(v, config) + case OpOr32: + return rewriteValueAMD64_OpOr32(v, config) + case OpOr64: + return rewriteValueAMD64_OpOr64(v, config) + case OpOr8: + return rewriteValueAMD64_OpOr8(v, config) + case OpRsh16Ux16: + return rewriteValueAMD64_OpRsh16Ux16(v, config) + case OpRsh16Ux32: + return rewriteValueAMD64_OpRsh16Ux32(v, config) + case OpRsh16Ux64: + return rewriteValueAMD64_OpRsh16Ux64(v, config) + case OpRsh16Ux8: + return rewriteValueAMD64_OpRsh16Ux8(v, config) + case OpRsh16x16: + return rewriteValueAMD64_OpRsh16x16(v, config) + case OpRsh16x32: + return rewriteValueAMD64_OpRsh16x32(v, config) + case OpRsh16x64: + return rewriteValueAMD64_OpRsh16x64(v, config) + case OpRsh16x8: + return rewriteValueAMD64_OpRsh16x8(v, config) + case OpRsh32Ux16: + return rewriteValueAMD64_OpRsh32Ux16(v, config) + case OpRsh32Ux32: + return rewriteValueAMD64_OpRsh32Ux32(v, config) + case OpRsh32Ux64: + return rewriteValueAMD64_OpRsh32Ux64(v, config) + case OpRsh32Ux8: + return rewriteValueAMD64_OpRsh32Ux8(v, config) + case OpRsh32x16: + return rewriteValueAMD64_OpRsh32x16(v, config) + case OpRsh32x32: + return rewriteValueAMD64_OpRsh32x32(v, config) + case OpRsh32x64: + return rewriteValueAMD64_OpRsh32x64(v, config) + case OpRsh32x8: + return rewriteValueAMD64_OpRsh32x8(v, config) + case OpRsh64Ux16: + return rewriteValueAMD64_OpRsh64Ux16(v, config) + case OpRsh64Ux32: + return rewriteValueAMD64_OpRsh64Ux32(v, config) + case OpRsh64Ux64: + return rewriteValueAMD64_OpRsh64Ux64(v, config) + case OpRsh64Ux8: + return rewriteValueAMD64_OpRsh64Ux8(v, config) + case OpRsh64x16: + return rewriteValueAMD64_OpRsh64x16(v, config) + case OpRsh64x32: + return rewriteValueAMD64_OpRsh64x32(v, config) + case OpRsh64x64: + return rewriteValueAMD64_OpRsh64x64(v, config) + case OpRsh64x8: + return rewriteValueAMD64_OpRsh64x8(v, config) + case OpRsh8Ux16: + return rewriteValueAMD64_OpRsh8Ux16(v, config) + case OpRsh8Ux32: + return rewriteValueAMD64_OpRsh8Ux32(v, config) + case OpRsh8Ux64: + return rewriteValueAMD64_OpRsh8Ux64(v, config) + case OpRsh8Ux8: + return rewriteValueAMD64_OpRsh8Ux8(v, config) + case OpRsh8x16: + return rewriteValueAMD64_OpRsh8x16(v, config) + case OpRsh8x32: + return rewriteValueAMD64_OpRsh8x32(v, config) + case OpRsh8x64: + return rewriteValueAMD64_OpRsh8x64(v, config) + case OpRsh8x8: + return rewriteValueAMD64_OpRsh8x8(v, config) + case OpAMD64SARB: + return rewriteValueAMD64_OpAMD64SARB(v, config) + case OpAMD64SARBconst: + return rewriteValueAMD64_OpAMD64SARBconst(v, config) + case OpAMD64SARL: + return rewriteValueAMD64_OpAMD64SARL(v, config) + case OpAMD64SARLconst: + return rewriteValueAMD64_OpAMD64SARLconst(v, config) + case OpAMD64SARQ: + return rewriteValueAMD64_OpAMD64SARQ(v, config) + case OpAMD64SARQconst: + return rewriteValueAMD64_OpAMD64SARQconst(v, config) + case OpAMD64SARW: + return rewriteValueAMD64_OpAMD64SARW(v, config) + case OpAMD64SARWconst: + return rewriteValueAMD64_OpAMD64SARWconst(v, config) + case OpAMD64SBBLcarrymask: + return rewriteValueAMD64_OpAMD64SBBLcarrymask(v, config) + case OpAMD64SBBQcarrymask: + return rewriteValueAMD64_OpAMD64SBBQcarrymask(v, config) + case OpAMD64SETA: + return rewriteValueAMD64_OpAMD64SETA(v, config) + case OpAMD64SETAE: + return rewriteValueAMD64_OpAMD64SETAE(v, config) + case OpAMD64SETB: + return rewriteValueAMD64_OpAMD64SETB(v, config) + case OpAMD64SETBE: + return rewriteValueAMD64_OpAMD64SETBE(v, config) + case OpAMD64SETEQ: + return rewriteValueAMD64_OpAMD64SETEQ(v, config) + case OpAMD64SETG: + return rewriteValueAMD64_OpAMD64SETG(v, config) + case OpAMD64SETGE: + return rewriteValueAMD64_OpAMD64SETGE(v, config) + case OpAMD64SETL: + return rewriteValueAMD64_OpAMD64SETL(v, config) + case OpAMD64SETLE: + return rewriteValueAMD64_OpAMD64SETLE(v, config) + case OpAMD64SETNE: + return rewriteValueAMD64_OpAMD64SETNE(v, config) + case OpAMD64SHLB: + return rewriteValueAMD64_OpAMD64SHLB(v, config) + case OpAMD64SHLL: + return rewriteValueAMD64_OpAMD64SHLL(v, config) + case OpAMD64SHLQ: + return rewriteValueAMD64_OpAMD64SHLQ(v, config) + case OpAMD64SHLW: + return rewriteValueAMD64_OpAMD64SHLW(v, config) + case OpAMD64SHRB: + return rewriteValueAMD64_OpAMD64SHRB(v, config) + case OpAMD64SHRL: + return rewriteValueAMD64_OpAMD64SHRL(v, config) + case OpAMD64SHRQ: + return rewriteValueAMD64_OpAMD64SHRQ(v, config) + case OpAMD64SHRW: + return rewriteValueAMD64_OpAMD64SHRW(v, config) + case OpAMD64SUBB: + return rewriteValueAMD64_OpAMD64SUBB(v, config) + case OpAMD64SUBBconst: + return rewriteValueAMD64_OpAMD64SUBBconst(v, config) + case OpAMD64SUBL: + return rewriteValueAMD64_OpAMD64SUBL(v, config) + case OpAMD64SUBLconst: + return rewriteValueAMD64_OpAMD64SUBLconst(v, config) + case OpAMD64SUBQ: + return rewriteValueAMD64_OpAMD64SUBQ(v, config) + case OpAMD64SUBQconst: + return rewriteValueAMD64_OpAMD64SUBQconst(v, config) + case OpAMD64SUBW: + return rewriteValueAMD64_OpAMD64SUBW(v, config) + case OpAMD64SUBWconst: + return rewriteValueAMD64_OpAMD64SUBWconst(v, config) + case OpSignExt16to32: + return rewriteValueAMD64_OpSignExt16to32(v, config) + case OpSignExt16to64: + return rewriteValueAMD64_OpSignExt16to64(v, config) + case OpSignExt32to64: + return rewriteValueAMD64_OpSignExt32to64(v, config) + case OpSignExt8to16: + return rewriteValueAMD64_OpSignExt8to16(v, config) + case OpSignExt8to32: + return rewriteValueAMD64_OpSignExt8to32(v, config) + case OpSignExt8to64: + return rewriteValueAMD64_OpSignExt8to64(v, config) + case OpSqrt: + return rewriteValueAMD64_OpSqrt(v, config) + case OpStaticCall: + return rewriteValueAMD64_OpStaticCall(v, config) + case OpStore: + return rewriteValueAMD64_OpStore(v, config) + case OpSub16: + return rewriteValueAMD64_OpSub16(v, config) + case OpSub32: + return rewriteValueAMD64_OpSub32(v, config) + case OpSub32F: + return rewriteValueAMD64_OpSub32F(v, config) + case OpSub64: + return rewriteValueAMD64_OpSub64(v, config) + case OpSub64F: + return rewriteValueAMD64_OpSub64F(v, config) + case OpSub8: + return rewriteValueAMD64_OpSub8(v, config) + case OpSubPtr: + return rewriteValueAMD64_OpSubPtr(v, config) + case OpTrunc16to8: + return rewriteValueAMD64_OpTrunc16to8(v, config) + case OpTrunc32to16: + return rewriteValueAMD64_OpTrunc32to16(v, config) + case OpTrunc32to8: + return rewriteValueAMD64_OpTrunc32to8(v, config) + case OpTrunc64to16: + return rewriteValueAMD64_OpTrunc64to16(v, config) + case OpTrunc64to32: + return rewriteValueAMD64_OpTrunc64to32(v, config) + case OpTrunc64to8: + return rewriteValueAMD64_OpTrunc64to8(v, config) + case OpAMD64XORB: + return rewriteValueAMD64_OpAMD64XORB(v, config) + case OpAMD64XORBconst: + return rewriteValueAMD64_OpAMD64XORBconst(v, config) + case OpAMD64XORL: + return rewriteValueAMD64_OpAMD64XORL(v, config) + case OpAMD64XORLconst: + return rewriteValueAMD64_OpAMD64XORLconst(v, config) + case OpAMD64XORQ: + return rewriteValueAMD64_OpAMD64XORQ(v, config) + case OpAMD64XORQconst: + return rewriteValueAMD64_OpAMD64XORQconst(v, config) + case OpAMD64XORW: + return rewriteValueAMD64_OpAMD64XORW(v, config) + case OpAMD64XORWconst: + return rewriteValueAMD64_OpAMD64XORWconst(v, config) + case OpXor16: + return rewriteValueAMD64_OpXor16(v, config) + case OpXor32: + return rewriteValueAMD64_OpXor32(v, config) + case OpXor64: + return rewriteValueAMD64_OpXor64(v, config) + case OpXor8: + return rewriteValueAMD64_OpXor8(v, config) + case OpZero: + return rewriteValueAMD64_OpZero(v, config) + case OpZeroExt16to32: + return rewriteValueAMD64_OpZeroExt16to32(v, config) + case OpZeroExt16to64: + return rewriteValueAMD64_OpZeroExt16to64(v, config) + case OpZeroExt32to64: + return rewriteValueAMD64_OpZeroExt32to64(v, config) + case OpZeroExt8to16: + return rewriteValueAMD64_OpZeroExt8to16(v, config) + case OpZeroExt8to32: + return rewriteValueAMD64_OpZeroExt8to32(v, config) + case OpZeroExt8to64: + return rewriteValueAMD64_OpZeroExt8to64(v, config) + } + return false +} +func rewriteValueAMD64_OpAMD64ADDB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDB x (MOVBconst [c])) + // cond: + // result: (ADDBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ADDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDB (MOVBconst [c]) x) + // cond: + // result: (ADDBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ADDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDB x (NEGB y)) + // cond: + // result: (SUBB x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64NEGB { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64SUBB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDBconst [c] x) + // cond: int8(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int8(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [c+d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = c + d + return true + } + // match: (ADDBconst [c] (ADDBconst [d] x)) + // cond: + // result: (ADDBconst [c+d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64ADDBconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDBconst) + v.AuxInt = c + d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDL x (MOVLconst [c])) + // cond: + // result: (ADDLconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ADDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDL (MOVLconst [c]) x) + // cond: + // result: (ADDLconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ADDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDL x (NEGL y)) + // cond: + // result: (SUBL x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64NEGL { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64SUBL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDLconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c+d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c + d + return true + } + // match: (ADDLconst [c] (ADDLconst [d] x)) + // cond: + // result: (ADDLconst [c+d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64ADDLconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDLconst) + v.AuxInt = c + d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ADDQconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ADDQconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ADDQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDQ x (SHLQconst [3] y)) + // cond: + // result: (LEAQ8 x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64SHLQconst { + break + } + if v.Args[1].AuxInt != 3 { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (SHLQconst [2] y)) + // cond: + // result: (LEAQ4 x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64SHLQconst { + break + } + if v.Args[1].AuxInt != 2 { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (SHLQconst [1] y)) + // cond: + // result: (LEAQ2 x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64SHLQconst { + break + } + if v.Args[1].AuxInt != 1 { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (ADDQ y y)) + // cond: + // result: (LEAQ2 x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQ { + break + } + y := v.Args[1].Args[0] + if v.Args[1].Args[1] != y { + break + } + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (ADDQ x y)) + // cond: + // result: (LEAQ2 y x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQ { + break + } + if v.Args[1].Args[0] != x { + break + } + y := v.Args[1].Args[1] + v.reset(OpAMD64LEAQ2) + v.AddArg(y) + v.AddArg(x) + return true + } + // match: (ADDQ x (ADDQ y x)) + // cond: + // result: (LEAQ2 y x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQ { + break + } + y := v.Args[1].Args[0] + if v.Args[1].Args[1] != x { + break + } + v.reset(OpAMD64LEAQ2) + v.AddArg(y) + v.AddArg(x) + return true + } + // match: (ADDQ (ADDQconst [c] x) y) + // cond: + // result: (LEAQ1 [c] x y) + for { + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + y := v.Args[1] + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (ADDQconst [c] y)) + // cond: + // result: (LEAQ1 [c] x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + c := v.Args[1].AuxInt + y := v.Args[1].Args[0] + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (LEAQ [c] {s} y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAQ1 [c] {s} x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64LEAQ { + break + } + c := v.Args[1].AuxInt + s := v.Args[1].Aux + y := v.Args[1].Args[0] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ (LEAQ [c] {s} x) y) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAQ1 [c] {s} x y) + for { + if v.Args[0].Op != OpAMD64LEAQ { + break + } + c := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[1] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQ x (NEGQ y)) + // cond: + // result: (SUBQ x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64NEGQ { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64SUBQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDQconst [c] (ADDQ x y)) + // cond: + // result: (LEAQ1 [c] x y) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64ADDQ { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQconst [c] (LEAQ [d] {s} x)) + // cond: + // result: (LEAQ [c+d] {s} x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64LEAQ { + break + } + d := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + v.reset(OpAMD64LEAQ) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (ADDQconst [c] (LEAQ1 [d] {s} x y)) + // cond: + // result: (LEAQ1 [c+d] {s} x y) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64LEAQ1 { + break + } + d := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQconst [c] (LEAQ2 [d] {s} x y)) + // cond: + // result: (LEAQ2 [c+d] {s} x y) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64LEAQ2 { + break + } + d := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + v.reset(OpAMD64LEAQ2) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQconst [c] (LEAQ4 [d] {s} x y)) + // cond: + // result: (LEAQ4 [c+d] {s} x y) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + d := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + v.reset(OpAMD64LEAQ4) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQconst [c] (LEAQ8 [d] {s} x y)) + // cond: + // result: (LEAQ8 [c+d] {s} x y) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + d := v.Args[0].AuxInt + s := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + v.reset(OpAMD64LEAQ8) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDQconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c+d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c + d + return true + } + // match: (ADDQconst [c] (ADDQconst [d] x)) + // cond: + // result: (ADDQconst [c+d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDQconst) + v.AuxInt = c + d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDW x (MOVWconst [c])) + // cond: + // result: (ADDWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ADDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDW (MOVWconst [c]) x) + // cond: + // result: (ADDWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ADDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDW x (NEGW y)) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64NEGW { + break + } + y := v.Args[1].Args[0] + v.reset(OpAMD64SUBW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDWconst [c] x) + // cond: int16(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int16(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c+d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = c + d + return true + } + // match: (ADDWconst [c] (ADDWconst [d] x)) + // cond: + // result: (ADDWconst [c+d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64ADDWconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDWconst) + v.AuxInt = c + d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDB x (MOVLconst [c])) + // cond: + // result: (ANDBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ANDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDB (MOVLconst [c]) x) + // cond: + // result: (ANDBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ANDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDB x (MOVBconst [c])) + // cond: + // result: (ANDBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ANDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDB (MOVBconst [c]) x) + // cond: + // result: (ANDBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ANDBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDB x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDBconst [c] _) + // cond: int8(c)==0 + // result: (MOVBconst [0]) + for { + c := v.AuxInt + if !(int8(c) == 0) { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (ANDBconst [c] x) + // cond: int8(c)==-1 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int8(c) == -1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [c&d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDL x (MOVLconst [c])) + // cond: + // result: (ANDLconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDL (MOVLconst [c]) x) + // cond: + // result: (ANDLconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ANDLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDL x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDLconst [c] _) + // cond: int32(c)==0 + // result: (MOVLconst [0]) + for { + c := v.AuxInt + if !(int32(c) == 0) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + // match: (ANDLconst [c] x) + // cond: int32(c)==-1 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == -1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c&d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ANDQconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ANDQconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDQ x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDQconst [0] _) + // cond: + // result: (MOVQconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + // match: (ANDQconst [-1] x) + // cond: + // result: x + for { + if v.AuxInt != -1 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c&d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDW x (MOVLconst [c])) + // cond: + // result: (ANDWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW (MOVLconst [c]) x) + // cond: + // result: (ANDWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW x (MOVWconst [c])) + // cond: + // result: (ANDWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW (MOVWconst [c]) x) + // cond: + // result: (ANDWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ANDWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ANDW x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ANDWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDWconst [c] _) + // cond: int16(c)==0 + // result: (MOVWconst [0]) + for { + c := v.AuxInt + if !(int16(c) == 0) { + break + } + v.reset(OpAMD64MOVWconst) + v.AuxInt = 0 + return true + } + // match: (ANDWconst [c] x) + // cond: int16(c)==-1 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int16(c) == -1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c&d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = c & d + return true + } + return false +} +func rewriteValueAMD64_OpAdd16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add16 x y) + // cond: + // result: (ADDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAdd32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add32 x y) + // cond: + // result: (ADDL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAdd32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add32F x y) + // cond: + // result: (ADDSS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDSS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAdd64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add64 x y) + // cond: + // result: (ADDQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAdd64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add64F x y) + // cond: + // result: (ADDSD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDSD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAdd8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Add8 x y) + // cond: + // result: (ADDB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAddPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AddPtr x y) + // cond: + // result: (ADDQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ADDQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAddr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Addr {sym} base) + // cond: + // result: (LEAQ {sym} base) + for { + sym := v.Aux + base := v.Args[0] + v.reset(OpAMD64LEAQ) + v.Aux = sym + v.AddArg(base) + return true + } + return false +} +func rewriteValueAMD64_OpAnd16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And16 x y) + // cond: + // result: (ANDW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAnd32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And32 x y) + // cond: + // result: (ANDL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAnd64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And64 x y) + // cond: + // result: (ANDQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAnd8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (And8 x y) + // cond: + // result: (ANDB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAvg64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Avg64u x y) + // cond: + // result: (AVGQU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64AVGQU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPB x (MOVBconst [c])) + // cond: + // result: (CMPBconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64CMPBconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (CMPB (MOVBconst [c]) x) + // cond: + // result: (InvertFlags (CMPBconst x [c])) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPBconst (MOVBconst [x]) [y]) + // cond: int8(x)==int8(y) + // result: (FlagEQ) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int8(x) == int8(y)) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPBconst (MOVBconst [x]) [y]) + // cond: int8(x)<int8(y) && uint8(x)<uint8(y) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int8(x) < int8(y) && uint8(x) < uint8(y)) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPBconst (MOVBconst [x]) [y]) + // cond: int8(x)<int8(y) && uint8(x)>uint8(y) + // result: (FlagLT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int8(x) < int8(y) && uint8(x) > uint8(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPBconst (MOVBconst [x]) [y]) + // cond: int8(x)>int8(y) && uint8(x)<uint8(y) + // result: (FlagGT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int8(x) > int8(y) && uint8(x) < uint8(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPBconst (MOVBconst [x]) [y]) + // cond: int8(x)>int8(y) && uint8(x)>uint8(y) + // result: (FlagGT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int8(x) > int8(y) && uint8(x) > uint8(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPBconst (ANDBconst _ [m]) [n]) + // cond: int8(m)+1==int8(n) && isPowerOfTwo(int64(int8(n))) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64ANDBconst { + break + } + m := v.Args[0].AuxInt + n := v.AuxInt + if !(int8(m)+1 == int8(n) && isPowerOfTwo(int64(int8(n)))) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPBconst (ANDB x y) [0]) + // cond: + // result: (TESTB x y) + for { + if v.Args[0].Op != OpAMD64ANDB { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTB) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMPBconst (ANDBconst [c] x) [0]) + // cond: + // result: (TESTBconst [c] x) + for { + if v.Args[0].Op != OpAMD64ANDBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPL x (MOVLconst [c])) + // cond: + // result: (CMPLconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64CMPLconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (CMPL (MOVLconst [c]) x) + // cond: + // result: (InvertFlags (CMPLconst x [c])) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int32(x) == int32(y)) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)<int32(y) && uint32(x)<uint32(y) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int32(x) < int32(y) && uint32(x) < uint32(y)) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)<int32(y) && uint32(x)>uint32(y) + // result: (FlagLT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)<uint32(y) + // result: (FlagGT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int32(x) > int32(y) && uint32(x) < uint32(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPLconst (MOVLconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // result: (FlagGT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPLconst (ANDLconst _ [m]) [n]) + // cond: int32(m)+1==int32(n) && isPowerOfTwo(int64(int32(n))) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64ANDLconst { + break + } + m := v.Args[0].AuxInt + n := v.AuxInt + if !(int32(m)+1 == int32(n) && isPowerOfTwo(int64(int32(n)))) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPLconst (ANDL x y) [0]) + // cond: + // result: (TESTL x y) + for { + if v.Args[0].Op != OpAMD64ANDL { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMPLconst (ANDLconst [c] x) [0]) + // cond: + // result: (TESTLconst [c] x) + for { + if v.Args[0].Op != OpAMD64ANDLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (CMPQconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64CMPQconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (CMPQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (InvertFlags (CMPQconst x [c])) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x==y + // result: (FlagEQ) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(x == y) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x<y && uint64(x)<uint64(y) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(x < y && uint64(x) < uint64(y)) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x<y && uint64(x)>uint64(y) + // result: (FlagLT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(x < y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x>y && uint64(x)<uint64(y) + // result: (FlagGT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(x > y && uint64(x) < uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPQconst (MOVQconst [x]) [y]) + // cond: x>y && uint64(x)>uint64(y) + // result: (FlagGT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(x > y && uint64(x) > uint64(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPQconst (ANDQconst _ [m]) [n]) + // cond: m+1==n && isPowerOfTwo(n) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64ANDQconst { + break + } + m := v.Args[0].AuxInt + n := v.AuxInt + if !(m+1 == n && isPowerOfTwo(n)) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPQconst (ANDQ x y) [0]) + // cond: + // result: (TESTQ x y) + for { + if v.Args[0].Op != OpAMD64ANDQ { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTQ) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMPQconst (ANDQconst [c] x) [0]) + // cond: + // result: (TESTQconst [c] x) + for { + if v.Args[0].Op != OpAMD64ANDQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPW x (MOVWconst [c])) + // cond: + // result: (CMPWconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64CMPWconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (CMPW (MOVWconst [c]) x) + // cond: + // result: (InvertFlags (CMPWconst x [c])) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64InvertFlags) + v0 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64CMPWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPWconst (MOVWconst [x]) [y]) + // cond: int16(x)==int16(y) + // result: (FlagEQ) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int16(x) == int16(y)) { + break + } + v.reset(OpAMD64FlagEQ) + return true + } + // match: (CMPWconst (MOVWconst [x]) [y]) + // cond: int16(x)<int16(y) && uint16(x)<uint16(y) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int16(x) < int16(y) && uint16(x) < uint16(y)) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPWconst (MOVWconst [x]) [y]) + // cond: int16(x)<int16(y) && uint16(x)>uint16(y) + // result: (FlagLT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int16(x) < int16(y) && uint16(x) > uint16(y)) { + break + } + v.reset(OpAMD64FlagLT_UGT) + return true + } + // match: (CMPWconst (MOVWconst [x]) [y]) + // cond: int16(x)>int16(y) && uint16(x)<uint16(y) + // result: (FlagGT_ULT) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int16(x) > int16(y) && uint16(x) < uint16(y)) { + break + } + v.reset(OpAMD64FlagGT_ULT) + return true + } + // match: (CMPWconst (MOVWconst [x]) [y]) + // cond: int16(x)>int16(y) && uint16(x)>uint16(y) + // result: (FlagGT_UGT) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + x := v.Args[0].AuxInt + y := v.AuxInt + if !(int16(x) > int16(y) && uint16(x) > uint16(y)) { + break + } + v.reset(OpAMD64FlagGT_UGT) + return true + } + // match: (CMPWconst (ANDWconst _ [m]) [n]) + // cond: int16(m)+1==int16(n) && isPowerOfTwo(int64(int16(n))) + // result: (FlagLT_ULT) + for { + if v.Args[0].Op != OpAMD64ANDWconst { + break + } + m := v.Args[0].AuxInt + n := v.AuxInt + if !(int16(m)+1 == int16(n) && isPowerOfTwo(int64(int16(n)))) { + break + } + v.reset(OpAMD64FlagLT_ULT) + return true + } + // match: (CMPWconst (ANDW x y) [0]) + // cond: + // result: (TESTW x y) + for { + if v.Args[0].Op != OpAMD64ANDW { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMPWconst (ANDWconst [c] x) [0]) + // cond: + // result: (TESTWconst [c] x) + for { + if v.Args[0].Op != OpAMD64ANDWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64TESTWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpClosureCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ClosureCall [argwid] entry closure mem) + // cond: + // result: (CALLclosure [argwid] entry closure mem) + for { + argwid := v.AuxInt + entry := v.Args[0] + closure := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64CALLclosure) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(closure) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpCom16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com16 x) + // cond: + // result: (NOTW x) + for { + x := v.Args[0] + v.reset(OpAMD64NOTW) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCom32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com32 x) + // cond: + // result: (NOTL x) + for { + x := v.Args[0] + v.reset(OpAMD64NOTL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCom64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com64 x) + // cond: + // result: (NOTQ x) + for { + x := v.Args[0] + v.reset(OpAMD64NOTQ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCom8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Com8 x) + // cond: + // result: (NOTB x) + for { + x := v.Args[0] + v.reset(OpAMD64NOTB) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpConst16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const16 [val]) + // cond: + // result: (MOVWconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConst32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const32 [val]) + // cond: + // result: (MOVLconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConst32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const32F [val]) + // cond: + // result: (MOVSSconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVSSconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConst64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const64 [val]) + // cond: + // result: (MOVQconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConst64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const64F [val]) + // cond: + // result: (MOVSDconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVSDconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConst8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Const8 [val]) + // cond: + // result: (MOVBconst [val]) + for { + val := v.AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = val + return true + } + return false +} +func rewriteValueAMD64_OpConstBool(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ConstBool [b]) + // cond: + // result: (MOVBconst [b]) + for { + b := v.AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = b + return true + } + return false +} +func rewriteValueAMD64_OpConstNil(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ConstNil) + // cond: + // result: (MOVQconst [0]) + for { + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpConvert(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Convert <t> x mem) + // cond: + // result: (MOVQconvert <t> x mem) + for { + t := v.Type + x := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQconvert) + v.Type = t + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto32 x) + // cond: + // result: (CVTTSS2SL x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTTSS2SL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto64 x) + // cond: + // result: (CVTTSS2SQ x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTTSS2SQ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32Fto64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32Fto64F x) + // cond: + // result: (CVTSS2SD x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSS2SD) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32to32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32to32F x) + // cond: + // result: (CVTSL2SS x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSL2SS) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt32to64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt32to64F x) + // cond: + // result: (CVTSL2SD x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSL2SD) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto32 x) + // cond: + // result: (CVTTSD2SL x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTTSD2SL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto32F x) + // cond: + // result: (CVTSD2SS x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSD2SS) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64Fto64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64Fto64 x) + // cond: + // result: (CVTTSD2SQ x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTTSD2SQ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64to32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64to32F x) + // cond: + // result: (CVTSQ2SS x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSQ2SS) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpCvt64to64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Cvt64to64F x) + // cond: + // result: (CVTSQ2SD x) + for { + x := v.Args[0] + v.reset(OpAMD64CVTSQ2SD) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpDeferCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (DeferCall [argwid] mem) + // cond: + // result: (CALLdefer [argwid] mem) + for { + argwid := v.AuxInt + mem := v.Args[0] + v.reset(OpAMD64CALLdefer) + v.AuxInt = argwid + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpDiv16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div16 x y) + // cond: + // result: (DIVW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div16u x y) + // cond: + // result: (DIVWU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVWU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32 x y) + // cond: + // result: (DIVL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32F x y) + // cond: + // result: (DIVSS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVSS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div32u x y) + // cond: + // result: (DIVLU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVLU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64 x y) + // cond: + // result: (DIVQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64F x y) + // cond: + // result: (DIVSD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVSD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div64u x y) + // cond: + // result: (DIVQU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVQU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpDiv8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div8 x y) + // cond: + // result: (DIVW (SignExt8to16 x) (SignExt8to16 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVW) + v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) + v1.AddArg(y) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpDiv8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Div8u x y) + // cond: + // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64DIVWU) + v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) + v1.AddArg(y) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpEq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq16 x y) + // cond: + // result: (SETEQ (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq32 x y) + // cond: + // result: (SETEQ (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq32F x y) + // cond: + // result: (SETEQF (UCOMISS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq64 x y) + // cond: + // result: (SETEQ (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq64F x y) + // cond: + // result: (SETEQF (UCOMISD x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Eq8 x y) + // cond: + // result: (SETEQ (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpEqPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (EqPtr x y) + // cond: + // result: (SETEQ (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETEQ) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq16 x y) + // cond: + // result: (SETGE (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGE) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq16U x y) + // cond: + // result: (SETAE (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32 x y) + // cond: + // result: (SETGE (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGE) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32F x y) + // cond: + // result: (SETGEF (UCOMISS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq32U x y) + // cond: + // result: (SETAE (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64 x y) + // cond: + // result: (SETGE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64F x y) + // cond: + // result: (SETGEF (UCOMISD x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq64U x y) + // cond: + // result: (SETAE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq8 x y) + // cond: + // result: (SETGE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGeq8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Geq8U x y) + // cond: + // result: (SETAE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETAE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGetClosurePtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GetClosurePtr) + // cond: + // result: (LoweredGetClosurePtr) + for { + v.reset(OpAMD64LoweredGetClosurePtr) + return true + } + return false +} +func rewriteValueAMD64_OpGetG(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GetG mem) + // cond: + // result: (LoweredGetG mem) + for { + mem := v.Args[0] + v.reset(OpAMD64LoweredGetG) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpGoCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (GoCall [argwid] mem) + // cond: + // result: (CALLgo [argwid] mem) + for { + argwid := v.AuxInt + mem := v.Args[0] + v.reset(OpAMD64CALLgo) + v.AuxInt = argwid + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpGreater16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater16 x y) + // cond: + // result: (SETG (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETG) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater16U x y) + // cond: + // result: (SETA (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETA) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32 x y) + // cond: + // result: (SETG (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETG) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32F x y) + // cond: + // result: (SETGF (UCOMISS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater32U x y) + // cond: + // result: (SETA (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETA) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64 x y) + // cond: + // result: (SETG (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETG) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64F x y) + // cond: + // result: (SETGF (UCOMISD x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater64U x y) + // cond: + // result: (SETA (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETA) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater8 x y) + // cond: + // result: (SETG (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETG) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpGreater8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Greater8U x y) + // cond: + // result: (SETA (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETA) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpHmul16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul16 x y) + // cond: + // result: (HMULW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul16u x y) + // cond: + // result: (HMULWU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULWU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul32 x y) + // cond: + // result: (HMULL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul32u x y) + // cond: + // result: (HMULLU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULLU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul64 x y) + // cond: + // result: (HMULQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul64u x y) + // cond: + // result: (HMULQU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULQU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul8 x y) + // cond: + // result: (HMULB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpHmul8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Hmul8u x y) + // cond: + // result: (HMULBU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64HMULBU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpITab(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ITab (Load ptr mem)) + // cond: + // result: (MOVQload ptr mem) + for { + if v.Args[0].Op != OpLoad { + break + } + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + v.reset(OpAMD64MOVQload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpInterCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (InterCall [argwid] entry mem) + // cond: + // result: (CALLinter [argwid] entry mem) + for { + argwid := v.AuxInt + entry := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64CALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpIsInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsInBounds idx len) + // cond: + // result: (SETB (CMPQ idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpIsNonNil(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsNonNil p) + // cond: + // result: (SETNE (TESTQ p p)) + for { + p := v.Args[0] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64TESTQ, TypeFlags) + v0.AddArg(p) + v0.AddArg(p) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpIsSliceInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsSliceInBounds idx len) + // cond: + // result: (SETBE (CMPQ idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LEAQ [c] {s} (ADDQconst [d] x)) + // cond: + // result: (LEAQ [c+d] {s} x) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64LEAQ) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (LEAQ [c] {s} (ADDQ x y)) + // cond: x.Op != OpSB && y.Op != OpSB + // result: (LEAQ1 [c] {s} x y) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQ { + break + } + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if !(x.Op != OpSB && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) + // cond: canMergeSym(sym1, sym2) + // result: (LEAQ [addOff(off1,off2)] {mergeSym(sym1,sym2)} x) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) + // cond: canMergeSym(sym1, sym2) + // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ1 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) + // cond: canMergeSym(sym1, sym2) + // result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ2 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) + // cond: canMergeSym(sym1, sym2) + // result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) + // cond: canMergeSym(sym1, sym2) + // result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[0].Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ1(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y) + // cond: x.Op != OpSB + // result: (LEAQ1 [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + y := v.Args[1] + if !(x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ1 [c] {s} x (ADDQconst [d] y)) + // cond: y.Op != OpSB + // result: (LEAQ1 [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + y := v.Args[1].Args[0] + if !(y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[1] + if !(canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ1 [off1] {sym1} x (LEAQ [off2] {sym2} y)) + // cond: canMergeSym(sym1, sym2) && y.Op != OpSB + // result: (LEAQ1 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + x := v.Args[0] + if v.Args[1].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[1].AuxInt + sym2 := v.Args[1].Aux + y := v.Args[1].Args[0] + if !(canMergeSym(sym1, sym2) && y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ1) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ2(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y) + // cond: x.Op != OpSB + // result: (LEAQ2 [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + y := v.Args[1] + if !(x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ2 [c] {s} x (ADDQconst [d] y)) + // cond: y.Op != OpSB + // result: (LEAQ2 [c+2*d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + y := v.Args[1].Args[0] + if !(y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = c + 2*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ2 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[1] + if !(canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ2) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y) + // cond: x.Op != OpSB + // result: (LEAQ4 [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + y := v.Args[1] + if !(x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ4 [c] {s} x (ADDQconst [d] y)) + // cond: y.Op != OpSB + // result: (LEAQ4 [c+4*d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + y := v.Args[1].Args[0] + if !(y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = c + 4*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ4 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[1] + if !(canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64LEAQ8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y) + // cond: x.Op != OpSB + // result: (LEAQ8 [c+d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + y := v.Args[1] + if !(x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ8 [c] {s} x (ADDQconst [d] y)) + // cond: y.Op != OpSB + // result: (LEAQ8 [c+8*d] {s} x y) + for { + c := v.AuxInt + s := v.Aux + x := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + y := v.Args[1].Args[0] + if !(y.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = c + 8*d + v.Aux = s + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) + // cond: canMergeSym(sym1, sym2) && x.Op != OpSB + // result: (LEAQ8 [addOff(off1,off2)] {mergeSym(sym1,sym2)} x y) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + x := v.Args[0].Args[0] + y := v.Args[1] + if !(canMergeSym(sym1, sym2) && x.Op != OpSB) { + break + } + v.reset(OpAMD64LEAQ8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpLeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16 x y) + // cond: + // result: (SETLE (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16U x y) + // cond: + // result: (SETBE (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32 x y) + // cond: + // result: (SETLE (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32F x y) + // cond: + // result: (SETGEF (UCOMISS y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32U x y) + // cond: + // result: (SETBE (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64 x y) + // cond: + // result: (SETLE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64F x y) + // cond: + // result: (SETGEF (UCOMISD y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64U x y) + // cond: + // result: (SETBE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8 x y) + // cond: + // result: (SETLE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETLE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLeq8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8U x y) + // cond: + // result: (SETBE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETBE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less16 x y) + // cond: + // result: (SETL (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less16U x y) + // cond: + // result: (SETB (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32 x y) + // cond: + // result: (SETL (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32F x y) + // cond: + // result: (SETGF (UCOMISS y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less32U x y) + // cond: + // result: (SETB (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64 x y) + // cond: + // result: (SETL (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64F x y) + // cond: + // result: (SETGF (UCOMISD y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETGF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess64U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less64U x y) + // cond: + // result: (SETB (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less8 x y) + // cond: + // result: (SETL (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETL) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLess8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Less8U x y) + // cond: + // result: (SETB (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETB) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpLoad(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Load <t> ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVQload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpAMD64MOVQload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is32BitInt(t) + // result: (MOVLload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t)) { + break + } + v.reset(OpAMD64MOVLload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is16BitInt(t) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t)) { + break + } + v.reset(OpAMD64MOVWload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: (t.IsBoolean() || is8BitInt(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsBoolean() || is8BitInt(t)) { + break + } + v.reset(OpAMD64MOVBload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is32BitFloat(t) + // result: (MOVSSload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load <t> ptr mem) + // cond: is64BitFloat(t) + // result: (MOVSDload ptr mem) + for { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is64BitFloat(t)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpLrot16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot16 <t> x [c]) + // cond: + // result: (ROLWconst <t> [c&15] x) + for { + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpAMD64ROLWconst) + v.Type = t + v.AuxInt = c & 15 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpLrot32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot32 <t> x [c]) + // cond: + // result: (ROLLconst <t> [c&31] x) + for { + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpAMD64ROLLconst) + v.Type = t + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpLrot64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot64 <t> x [c]) + // cond: + // result: (ROLQconst <t> [c&63] x) + for { + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpAMD64ROLQconst) + v.Type = t + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpLrot8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lrot8 <t> x [c]) + // cond: + // result: (ROLBconst <t> [c&7] x) + for { + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpAMD64ROLBconst) + v.Type = t + v.AuxInt = c & 7 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x16 <t> x y) + // cond: + // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x32 <t> x y) + // cond: + // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x64 <t> x y) + // cond: + // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh16x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x8 <t> x y) + // cond: + // result: (ANDW (SHLW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHLW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x16 <t> x y) + // cond: + // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x32 <t> x y) + // cond: + // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x64 <t> x y) + // cond: + // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh32x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x8 <t> x y) + // cond: + // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x16 <t> x y) + // cond: + // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x32 <t> x y) + // cond: + // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x64 <t> x y) + // cond: + // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh64x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh64x8 <t> x y) + // cond: + // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHLQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x16 <t> x y) + // cond: + // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x32 <t> x y) + // cond: + // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x64 <t> x y) + // cond: + // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpLsh8x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh8x8 <t> x y) + // cond: + // result: (ANDB (SHLB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHLB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBQSX (MOVBload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVBQSXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVBload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVBQSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBQSX (ANDBconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDQconst [c & 0x7f] x) + for { + if v.Args[0].Op != OpAMD64ANDBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0x7f + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBQZX (MOVBload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVBQZXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVBload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVBQZXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBQZX (ANDBconst [c] x)) + // cond: + // result: (ANDQconst [c & 0xff] x) + for { + if v.Args[0].Op != OpAMD64ANDBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0xff + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBstore { + break + } + off2 := v.Args[1].AuxInt + sym2 := v.Args[1].Aux + ptr2 := v.Args[1].Args[0] + x := v.Args[1].Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVBload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVBload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVBload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBloadidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ1 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBload [off] {sym} (ADDQ ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVBloadidx1 [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQ { + break + } + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBloadidx1(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVBloadidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBQSX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBQZX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVBstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBconst [c]) mem) + // cond: validOff(off) + // result: (MOVBstoreconst [makeValAndOff(int64(int8(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = makeValAndOff(int64(int8(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBstoreidx1 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ1 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBstoreidx1) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} (ADDQ ptr idx) val mem) + // cond: ptr.Op != OpSB + // result: (MOVBstoreidx1 [off] {sym} ptr idx val mem) + for { + off := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQ { + break + } + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpAMD64MOVBstoreidx1) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVBstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconst [x] {sym1} (LEAQ1 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ1 { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconst [x] {sym} (ADDQ ptr idx) mem) + // cond: + // result: (MOVBstoreconstidx1 [x] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQ { + break + } + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + v.reset(OpAMD64MOVBstoreconstidx1) + v.AuxInt = x + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + c := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + c := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVBstoreconstidx1) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstoreidx1(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVBstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVBstoreidx1) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLQSX (MOVLload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVLQSXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVLload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVLQSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQSX (ANDLconst [c] x)) + // cond: c & 0x80000000 == 0 + // result: (ANDQconst [c & 0x7fffffff] x) + for { + if v.Args[0].Op != OpAMD64ANDLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if !(c&0x80000000 == 0) { + break + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0x7fffffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLQZX (MOVLload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVLQZXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVLload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVLQZXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVLQZX (ANDLconst [c] x)) + // cond: + // result: (ANDQconst [c & 0xffffffff] x) + for { + if v.Args[0].Op != OpAMD64ANDLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0xffffffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLstore { + break + } + off2 := v.Args[1].AuxInt + sym2 := v.Args[1].Aux + ptr2 := v.Args[1].Args[0] + x := v.Args[1].Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVLload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVLload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLloadidx4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLQSX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLQZX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVLstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) + // cond: validOff(off) + // result: (MOVLstoreconst [makeValAndOff(int64(int32(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(int64(int32(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVLstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconst [x] {sym1} (LEAQ4 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + c := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + c := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstoreconstidx4) + v.AuxInt = ValAndOff(x).add(4 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstoreidx4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVOload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVOload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVOload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVOload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVOstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVOstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVOstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVOstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVOstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQstore { + break + } + off2 := v.Args[1].AuxInt + sym2 := v.Args[1].Aux + ptr2 := v.Args[1].Args[0] + x := v.Args[1].Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVQload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQloadidx8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVQstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) + // cond: validValAndOff(c,off) + // result: (MOVQstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + mem := v.Args[2] + if !(validValAndOff(c, off)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconst [x] {sym1} (LEAQ8 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + c := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + c := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQstoreconstidx8) + v.AuxInt = ValAndOff(x).add(8 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstoreidx8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVQstoreidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVSDload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVSDload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSDload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVSDload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSDloadidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDloadidx8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSDloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVSDstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSDstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSDstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSDstoreidx8 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ8 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSDstoreidx8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSDstoreidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVSSload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVSSload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSSload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVSSload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSSloadidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSloadidx4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSSloadidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVSSstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVSSstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSSstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVSSstoreidx4 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ4 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVSSstoreidx4(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVSSstoreidx4) + v.AuxInt = c + 4*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWQSX (MOVWload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVWQSXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVWload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVWQSXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVWQSX (ANDWconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDQconst [c & 0x7fff] x) + for { + if v.Args[0].Op != OpAMD64ANDWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + if !(c&0x8000 == 0) { + break + } + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWQZX (MOVWload [off] {sym} ptr mem)) + // cond: + // result: @v.Args[0].Block (MOVWQZXload <v.Type> [off] {sym} ptr mem) + for { + if v.Args[0].Op != OpAMD64MOVWload { + break + } + off := v.Args[0].AuxInt + sym := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[0].Args[1] + b = v.Args[0].Block + v0 := b.NewValue0(v.Line, OpAMD64MOVWQZXload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVWQZX (ANDWconst [c] x)) + // cond: + // result: (ANDQconst [c & 0xffff] x) + for { + if v.Args[0].Op != OpAMD64ANDWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ANDQconst) + v.AuxInt = c & 0xffff + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWstore { + break + } + off2 := v.Args[1].AuxInt + sym2 := v.Args[1].Aux + ptr2 := v.Args[1].Args[0] + x := v.Args[1].Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem) + // cond: + // result: (MOVWload [addOff(off1, off2)] {sym} ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVWload) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWload [addOff(off1,off2)] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWload) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWloadidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ2 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWloadidx2(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: + // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: + // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWloadidx2) + v.AuxInt = c + 2*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWQSX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem) + // cond: + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWQZX { + break + } + x := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem) + // cond: + // result: (MOVWstore [addOff(off1, off2)] {sym} ptr val mem) + for { + off1 := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off2 := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = addOff(off1, off2) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWconst [c]) mem) + // cond: validOff(off) + // result: (MOVWstoreconst [makeValAndOff(int64(int16(c)),off)] {sym} ptr mem) + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + mem := v.Args[2] + if !(validOff(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(int64(int16(c)), off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstore [addOff(off1,off2)] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + base := v.Args[0].Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (LEAQ2 [off2] {sym2} ptr idx) val mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreidx2 [addOff(off1, off2)] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ2 { + break + } + off2 := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = addOff(off1, off2) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem) + // cond: ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) + for { + sc := v.AuxInt + s := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + off := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = s + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) + // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) + // result: (MOVWstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) + for { + sc := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off)) { + break + } + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = ValAndOff(sc).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconst [x] {sym1} (LEAQ2 [off] {sym2} ptr idx) mem) + // cond: canMergeSym(sym1, sym2) + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(off)] {mergeSym(sym1,sym2)} ptr idx mem) + for { + x := v.AuxInt + sym1 := v.Aux + if v.Args[0].Op != OpAMD64LEAQ2 { + break + } + off := v.Args[0].AuxInt + sym2 := v.Args[0].Aux + ptr := v.Args[0].Args[0] + idx := v.Args[0].Args[1] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(off) + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + c := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) + // cond: + // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) + for { + x := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + c := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVWstoreconstidx2) + v.AuxInt = ValAndOff(x).add(2 * c) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVWstoreidx2(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: + // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + if v.Args[0].Op != OpAMD64ADDQconst { + break + } + d := v.Args[0].AuxInt + ptr := v.Args[0].Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: + // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + if v.Args[1].Op != OpAMD64ADDQconst { + break + } + d := v.Args[1].AuxInt + idx := v.Args[1].Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVWstoreidx2) + v.AuxInt = c + 2*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULB x (MOVBconst [c])) + // cond: + // result: (MULBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64MULBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULB (MOVBconst [c]) x) + // cond: + // result: (MULBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64MULBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [c*d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULL x (MOVLconst [c])) + // cond: + // result: (MULLconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULL (MOVLconst [c]) x) + // cond: + // result: (MULLconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64MULLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c*d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (MULQconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64MULQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (MULQconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64MULQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULQconst [-1] x) + // cond: + // result: (NEGQ x) + for { + if v.AuxInt != -1 { + break + } + x := v.Args[0] + v.reset(OpAMD64NEGQ) + v.AddArg(x) + return true + } + // match: (MULQconst [0] _) + // cond: + // result: (MOVQconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + // match: (MULQconst [1] x) + // cond: + // result: x + for { + if v.AuxInt != 1 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (MULQconst [3] x) + // cond: + // result: (LEAQ2 x x) + for { + if v.AuxInt != 3 { + break + } + x := v.Args[0] + v.reset(OpAMD64LEAQ2) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [5] x) + // cond: + // result: (LEAQ4 x x) + for { + if v.AuxInt != 5 { + break + } + x := v.Args[0] + v.reset(OpAMD64LEAQ4) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [9] x) + // cond: + // result: (LEAQ8 x x) + for { + if v.AuxInt != 9 { + break + } + x := v.Args[0] + v.reset(OpAMD64LEAQ8) + v.AddArg(x) + v.AddArg(x) + return true + } + // match: (MULQconst [c] x) + // cond: isPowerOfTwo(c) + // result: (SHLQconst [log2(c)] x) + for { + c := v.AuxInt + x := v.Args[0] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpAMD64SHLQconst) + v.AuxInt = log2(c) + v.AddArg(x) + return true + } + // match: (MULQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c*d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULW x (MOVWconst [c])) + // cond: + // result: (MULWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64MULWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (MULW (MOVWconst [c]) x) + // cond: + // result: (MULWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64MULWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MULWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MULWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c*d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = c * d + return true + } + return false +} +func rewriteValueAMD64_OpMod16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod16 x y) + // cond: + // result: (MODW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod16u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod16u x y) + // cond: + // result: (MODWU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODWU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod32 x y) + // cond: + // result: (MODL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod32u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod32u x y) + // cond: + // result: (MODLU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODLU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod64 x y) + // cond: + // result: (MODQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod64u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod64u x y) + // cond: + // result: (MODQU x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODQU) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMod8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod8 x y) + // cond: + // result: (MODW (SignExt8to16 x) (SignExt8to16 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODW) + v0 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpSignExt8to16, config.fe.TypeInt16()) + v1.AddArg(y) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpMod8u(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mod8u x y) + // cond: + // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MODWU) + v0 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to16, config.fe.TypeUInt16()) + v1.AddArg(y) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpMove(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Move [0] _ _ mem) + // cond: + // result: mem + for { + if v.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Move [1] dst src mem) + // cond: + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if v.AuxInt != 1 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [2] dst src mem) + // cond: + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if v.AuxInt != 2 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [4] dst src mem) + // cond: + // result: (MOVLstore dst (MOVLload src mem) mem) + for { + if v.AuxInt != 4 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [8] dst src mem) + // cond: + // result: (MOVQstore dst (MOVQload src mem) mem) + for { + if v.AuxInt != 8 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [16] dst src mem) + // cond: + // result: (MOVOstore dst (MOVOload src mem) mem) + for { + if v.AuxInt != 16 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVOstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [3] dst src mem) + // cond: + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if v.AuxInt != 3 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVWstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [5] dst src mem) + // cond: + // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if v.AuxInt != 5 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVBload, config.fe.TypeUInt8()) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [6] dst src mem) + // cond: + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if v.AuxInt != 6 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AuxInt = 4 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVWload, config.fe.TypeUInt16()) + v0.AuxInt = 4 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [7] dst src mem) + // cond: + // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) + for { + if v.AuxInt != 7 { + break + } + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = 3 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) + v0.AuxInt = 3 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVLstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpAMD64MOVLload, config.fe.TypeUInt32()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [size] dst src mem) + // cond: size > 8 && size < 16 + // result: (MOVQstore [size-8] dst (MOVQload [size-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + size := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(size > 8 && size < 16) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = size - 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) + v0.AuxInt = size - 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [size] dst src mem) + // cond: size > 16 && size%16 != 0 && size%16 <= 8 + // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVQstore dst (MOVQload src mem) mem)) + for { + size := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(size > 16 && size%16 != 0 && size%16 <= 8) { + break + } + v.reset(OpMove) + v.AuxInt = size - size%16 + v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type) + v0.AddArg(dst) + v0.AuxInt = size % 16 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type) + v1.AddArg(src) + v1.AuxInt = size % 16 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) + v2.AddArg(dst) + v3 := b.NewValue0(v.Line, OpAMD64MOVQload, config.fe.TypeUInt64()) + v3.AddArg(src) + v3.AddArg(mem) + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Move [size] dst src mem) + // cond: size > 16 && size%16 != 0 && size%16 > 8 + // result: (Move [size-size%16] (ADDQconst <dst.Type> dst [size%16]) (ADDQconst <src.Type> src [size%16]) (MOVOstore dst (MOVOload src mem) mem)) + for { + size := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(size > 16 && size%16 != 0 && size%16 > 8) { + break + } + v.reset(OpMove) + v.AuxInt = size - size%16 + v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, dst.Type) + v0.AddArg(dst) + v0.AuxInt = size % 16 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64ADDQconst, src.Type) + v1.AddArg(src) + v1.AuxInt = size % 16 + v.AddArg(v1) + v2 := b.NewValue0(v.Line, OpAMD64MOVOstore, TypeMem) + v2.AddArg(dst) + v3 := b.NewValue0(v.Line, OpAMD64MOVOload, TypeInt128) + v3.AddArg(src) + v3.AddArg(mem) + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Move [size] dst src mem) + // cond: size >= 32 && size <= 16*64 && size%16 == 0 + // result: (DUFFCOPY [14*(64-size/16)] dst src mem) + for { + size := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(size >= 32 && size <= 16*64 && size%16 == 0) { + break + } + v.reset(OpAMD64DUFFCOPY) + v.AuxInt = 14 * (64 - size/16) + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + // match: (Move [size] dst src mem) + // cond: size > 16*64 && size%8 == 0 + // result: (REPMOVSQ dst src (MOVQconst [size/8]) mem) + for { + size := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(size > 16*64 && size%8 == 0) { + break + } + v.reset(OpAMD64REPMOVSQ) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v0.AuxInt = size / 8 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpMul16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul16 x y) + // cond: + // result: (MULW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMul32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul32 x y) + // cond: + // result: (MULL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMul32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul32F x y) + // cond: + // result: (MULSS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULSS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMul64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul64 x y) + // cond: + // result: (MULQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMul64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul64F x y) + // cond: + // result: (MULSD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULSD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpMul8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul8 x y) + // cond: + // result: (MULB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64MULB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEGB (MOVBconst [c])) + // cond: + // result: (MOVBconst [-c]) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEGL (MOVLconst [c])) + // cond: + // result: (MOVLconst [-c]) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEGQ (MOVQconst [c])) + // cond: + // result: (MOVQconst [-c]) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NEGW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NEGW (MOVWconst [c])) + // cond: + // result: (MOVWconst [-c]) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = -c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOTB (MOVBconst [c])) + // cond: + // result: (MOVBconst [^c]) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOTL (MOVLconst [c])) + // cond: + // result: (MOVLconst [^c]) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOTQ (MOVQconst [c])) + // cond: + // result: (MOVQconst [^c]) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpAMD64NOTW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NOTW (MOVWconst [c])) + // cond: + // result: (MOVWconst [^c]) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = ^c + return true + } + return false +} +func rewriteValueAMD64_OpNeg16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg16 x) + // cond: + // result: (NEGW x) + for { + x := v.Args[0] + v.reset(OpAMD64NEGW) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpNeg32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg32 x) + // cond: + // result: (NEGL x) + for { + x := v.Args[0] + v.reset(OpAMD64NEGL) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpNeg32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg32F x) + // cond: + // result: (PXOR x (MOVSSconst <config.Frontend().TypeFloat32()> [f2i(math.Copysign(0, -1))])) + for { + x := v.Args[0] + v.reset(OpAMD64PXOR) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64MOVSSconst, config.Frontend().TypeFloat32()) + v0.AuxInt = f2i(math.Copysign(0, -1)) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeg64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg64 x) + // cond: + // result: (NEGQ x) + for { + x := v.Args[0] + v.reset(OpAMD64NEGQ) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpNeg64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg64F x) + // cond: + // result: (PXOR x (MOVSDconst <config.Frontend().TypeFloat64()> [f2i(math.Copysign(0, -1))])) + for { + x := v.Args[0] + v.reset(OpAMD64PXOR) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64MOVSDconst, config.Frontend().TypeFloat64()) + v0.AuxInt = f2i(math.Copysign(0, -1)) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeg8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neg8 x) + // cond: + // result: (NEGB x) + for { + x := v.Args[0] + v.reset(OpAMD64NEGB) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpNeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq16 x y) + // cond: + // result: (SETNE (CMPW x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPW, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq32 x y) + // cond: + // result: (SETNE (CMPL x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPL, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq32F x y) + // cond: + // result: (SETNEF (UCOMISS x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISS, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeq64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq64 x y) + // cond: + // result: (SETNE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq64F x y) + // cond: + // result: (SETNEF (UCOMISD x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNEF) + v0 := b.NewValue0(v.Line, OpAMD64UCOMISD, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq8 x y) + // cond: + // result: (SETNE (CMPB x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPB, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNeqPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqPtr x y) + // cond: + // result: (SETNE (CMPQ x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SETNE) + v0 := b.NewValue0(v.Line, OpAMD64CMPQ, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpNilCheck(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NilCheck ptr mem) + // cond: + // result: (LoweredNilCheck ptr mem) + for { + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64LoweredNilCheck) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpNot(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Not x) + // cond: + // result: (XORBconst [1] x) + for { + x := v.Args[0] + v.reset(OpAMD64XORBconst) + v.AuxInt = 1 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORB x (MOVBconst [c])) + // cond: + // result: (ORBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ORBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORB (MOVBconst [c]) x) + // cond: + // result: (ORBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ORBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORB x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORBconst [c] x) + // cond: int8(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int8(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORBconst [c] _) + // cond: int8(c)==-1 + // result: (MOVBconst [-1]) + for { + c := v.AuxInt + if !(int8(c) == -1) { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = -1 + return true + } + // match: (ORBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [c|d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORL x (MOVLconst [c])) + // cond: + // result: (ORLconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORL (MOVLconst [c]) x) + // cond: + // result: (ORLconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORL x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORLconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORLconst [c] _) + // cond: int32(c)==-1 + // result: (MOVLconst [-1]) + for { + c := v.AuxInt + if !(int32(c) == -1) { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = -1 + return true + } + // match: (ORLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c|d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (ORQconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (ORQconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64ORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORQ x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORQconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORQconst [-1] _) + // cond: + // result: (MOVQconst [-1]) + for { + if v.AuxInt != -1 { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = -1 + return true + } + // match: (ORQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c|d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORW x (MOVWconst [c])) + // cond: + // result: (ORWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64ORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORW (MOVWconst [c]) x) + // cond: + // result: (ORWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64ORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ORW x x) + // cond: + // result: x + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ORWconst [c] x) + // cond: int16(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int16(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORWconst [c] _) + // cond: int16(c)==-1 + // result: (MOVWconst [-1]) + for { + c := v.AuxInt + if !(int16(c) == -1) { + break + } + v.reset(OpAMD64MOVWconst) + v.AuxInt = -1 + return true + } + // match: (ORWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c|d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = c | d + return true + } + return false +} +func rewriteValueAMD64_OpOffPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OffPtr [off] ptr) + // cond: + // result: (ADDQconst [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpAMD64ADDQconst) + v.AuxInt = off + v.AddArg(ptr) + return true + } + return false +} +func rewriteValueAMD64_OpOr16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or16 x y) + // cond: + // result: (ORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ORW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpOr32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or32 x y) + // cond: + // result: (ORL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ORL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpOr64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or64 x y) + // cond: + // result: (ORQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ORQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpOr8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or8 x y) + // cond: + // result: (ORB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ORB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux16 <t> x y) + // cond: + // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux32 <t> x y) + // cond: + // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux64 <t> x y) + // cond: + // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux8 <t> x y) + // cond: + // result: (ANDW (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDW) + v0 := b.NewValue0(v.Line, OpAMD64SHRW, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 16 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x16 <t> x y) + // cond: + // result: (SARW <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 16 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x32 <t> x y) + // cond: + // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 16 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x64 <t> x y) + // cond: + // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 16 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh16x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x8 <t> x y) + // cond: + // result: (SARW <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARW) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 16 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux16 <t> x y) + // cond: + // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux32 <t> x y) + // cond: + // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux64 <t> x y) + // cond: + // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux8 <t> x y) + // cond: + // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDL) + v0 := b.NewValue0(v.Line, OpAMD64SHRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 32 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x16 <t> x y) + // cond: + // result: (SARL <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARL) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 32 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x32 <t> x y) + // cond: + // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARL) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 32 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x64 <t> x y) + // cond: + // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARL) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 32 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh32x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x8 <t> x y) + // cond: + // result: (SARL <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARL) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 32 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux16 <t> x y) + // cond: + // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux32 <t> x y) + // cond: + // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux64 <t> x y) + // cond: + // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64Ux8 <t> x y) + // cond: + // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDQ) + v0 := b.NewValue0(v.Line, OpAMD64SHRQ, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 64 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x16 <t> x y) + // cond: + // result: (SARQ <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARQ) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 64 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x32 <t> x y) + // cond: + // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARQ) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 64 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x64 <t> x y) + // cond: + // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARQ) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 64 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh64x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh64x8 <t> x y) + // cond: + // result: (SARQ <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARQ) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 64 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux16 <t> x y) + // cond: + // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux32 <t> x y) + // cond: + // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux64 <t> x y) + // cond: + // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux8 <t> x y) + // cond: + // result: (ANDB (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8]))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64ANDB) + v0 := b.NewValue0(v.Line, OpAMD64SHRB, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, t) + v2 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v2.AddArg(y) + v2.AuxInt = 8 + v1.AddArg(v2) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x16 <t> x y) + // cond: + // result: (SARB <t> x (ORW <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARB) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORW, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPWconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 8 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x32 <t> x y) + // cond: + // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARB) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORL, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPLconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 8 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x64 <t> x y) + // cond: + // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARB) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORQ, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTQ, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBQcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPQconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 8 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpRsh8x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x8 <t> x y) + // cond: + // result: (SARB <t> x (ORB <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8]))))) + for { + t := v.Type + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SARB) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpAMD64ORB, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Line, OpAMD64NOTL, y.Type) + v2 := b.NewValue0(v.Line, OpAMD64SBBLcarrymask, y.Type) + v3 := b.NewValue0(v.Line, OpAMD64CMPBconst, TypeFlags) + v3.AddArg(y) + v3.AuxInt = 8 + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARB x (MOVQconst [c])) + // cond: + // result: (SARBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARB x (MOVLconst [c])) + // cond: + // result: (SARBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARB x (MOVWconst [c])) + // cond: + // result: (SARBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARB x (MOVBconst [c])) + // cond: + // result: (SARBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARBconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [d>>uint64(c)]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARL x (MOVQconst [c])) + // cond: + // result: (SARLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARL x (MOVLconst [c])) + // cond: + // result: (SARLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARL x (MOVWconst [c])) + // cond: + // result: (SARLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARL x (MOVBconst [c])) + // cond: + // result: (SARLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARLconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [d>>uint64(c)]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARQ x (MOVQconst [c])) + // cond: + // result: (SARQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SARQ x (MOVLconst [c])) + // cond: + // result: (SARQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SARQ x (MOVWconst [c])) + // cond: + // result: (SARQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SARQ x (MOVBconst [c])) + // cond: + // result: (SARQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [d>>uint64(c)]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARW x (MOVQconst [c])) + // cond: + // result: (SARWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARW x (MOVLconst [c])) + // cond: + // result: (SARWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARW x (MOVWconst [c])) + // cond: + // result: (SARWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SARW x (MOVBconst [c])) + // cond: + // result: (SARWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SARWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SARWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SARWconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [d>>uint64(c)]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = d >> uint64(c) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SBBLcarrymask (FlagEQ)) + // cond: + // result: (MOVLconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + // match: (SBBLcarrymask (FlagLT_ULT)) + // cond: + // result: (MOVLconst [-1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = -1 + return true + } + // match: (SBBLcarrymask (FlagLT_UGT)) + // cond: + // result: (MOVLconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + // match: (SBBLcarrymask (FlagGT_ULT)) + // cond: + // result: (MOVLconst [-1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = -1 + return true + } + // match: (SBBLcarrymask (FlagGT_UGT)) + // cond: + // result: (MOVLconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SBBQcarrymask (FlagEQ)) + // cond: + // result: (MOVQconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + // match: (SBBQcarrymask (FlagLT_ULT)) + // cond: + // result: (MOVQconst [-1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = -1 + return true + } + // match: (SBBQcarrymask (FlagLT_UGT)) + // cond: + // result: (MOVQconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + // match: (SBBQcarrymask (FlagGT_ULT)) + // cond: + // result: (MOVQconst [-1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = -1 + return true + } + // match: (SBBQcarrymask (FlagGT_UGT)) + // cond: + // result: (MOVQconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETA(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETA (InvertFlags x)) + // cond: + // result: (SETB x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETB) + v.AddArg(x) + return true + } + // match: (SETA (FlagEQ)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETA (FlagLT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETA (FlagLT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETA (FlagGT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETA (FlagGT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETAE (InvertFlags x)) + // cond: + // result: (SETBE x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETBE) + v.AddArg(x) + return true + } + // match: (SETAE (FlagEQ)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETAE (FlagLT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETAE (FlagLT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETAE (FlagGT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETAE (FlagGT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETB (InvertFlags x)) + // cond: + // result: (SETA x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETA) + v.AddArg(x) + return true + } + // match: (SETB (FlagEQ)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETB (FlagLT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETB (FlagLT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETB (FlagGT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETB (FlagGT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETBE (InvertFlags x)) + // cond: + // result: (SETAE x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETAE) + v.AddArg(x) + return true + } + // match: (SETBE (FlagEQ)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETBE (FlagLT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETBE (FlagLT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETBE (FlagGT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETBE (FlagGT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETEQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETEQ (InvertFlags x)) + // cond: + // result: (SETEQ x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETEQ) + v.AddArg(x) + return true + } + // match: (SETEQ (FlagEQ)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETEQ (FlagLT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETEQ (FlagLT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETEQ (FlagGT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETEQ (FlagGT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETG(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETG (InvertFlags x)) + // cond: + // result: (SETL x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETL) + v.AddArg(x) + return true + } + // match: (SETG (FlagEQ)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETG (FlagLT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETG (FlagLT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETG (FlagGT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETG (FlagGT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETGE (InvertFlags x)) + // cond: + // result: (SETLE x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETLE) + v.AddArg(x) + return true + } + // match: (SETGE (FlagEQ)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETGE (FlagLT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETGE (FlagLT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETGE (FlagGT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETGE (FlagGT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETL (InvertFlags x)) + // cond: + // result: (SETG x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETG) + v.AddArg(x) + return true + } + // match: (SETL (FlagEQ)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETL (FlagLT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETL (FlagLT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETL (FlagGT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETL (FlagGT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETLE (InvertFlags x)) + // cond: + // result: (SETGE x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETGE) + v.AddArg(x) + return true + } + // match: (SETLE (FlagEQ)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETLE (FlagLT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETLE (FlagLT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETLE (FlagGT_ULT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETLE (FlagGT_UGT)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETNE(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SETNE (InvertFlags x)) + // cond: + // result: (SETNE x) + for { + if v.Args[0].Op != OpAMD64InvertFlags { + break + } + x := v.Args[0].Args[0] + v.reset(OpAMD64SETNE) + v.AddArg(x) + return true + } + // match: (SETNE (FlagEQ)) + // cond: + // result: (MOVBconst [0]) + for { + if v.Args[0].Op != OpAMD64FlagEQ { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + // match: (SETNE (FlagLT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETNE (FlagLT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagLT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETNE (FlagGT_ULT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_ULT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + // match: (SETNE (FlagGT_UGT)) + // cond: + // result: (MOVBconst [1]) + for { + if v.Args[0].Op != OpAMD64FlagGT_UGT { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 1 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHLB x (MOVQconst [c])) + // cond: + // result: (SHLBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLB x (MOVLconst [c])) + // cond: + // result: (SHLBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLB x (MOVWconst [c])) + // cond: + // result: (SHLBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLB x (MOVBconst [c])) + // cond: + // result: (SHLBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHLL x (MOVQconst [c])) + // cond: + // result: (SHLLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLL x (MOVLconst [c])) + // cond: + // result: (SHLLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLL x (MOVWconst [c])) + // cond: + // result: (SHLLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLL x (MOVBconst [c])) + // cond: + // result: (SHLLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHLQ x (MOVQconst [c])) + // cond: + // result: (SHLQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHLQ x (MOVLconst [c])) + // cond: + // result: (SHLQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHLQ x (MOVWconst [c])) + // cond: + // result: (SHLQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHLQ x (MOVBconst [c])) + // cond: + // result: (SHLQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHLW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHLW x (MOVQconst [c])) + // cond: + // result: (SHLWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLW x (MOVLconst [c])) + // cond: + // result: (SHLWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLW x (MOVWconst [c])) + // cond: + // result: (SHLWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHLW x (MOVBconst [c])) + // cond: + // result: (SHLWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHLWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHRB x (MOVQconst [c])) + // cond: + // result: (SHRBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRB x (MOVLconst [c])) + // cond: + // result: (SHRBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRB x (MOVWconst [c])) + // cond: + // result: (SHRBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRB x (MOVBconst [c])) + // cond: + // result: (SHRBconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRBconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHRL x (MOVQconst [c])) + // cond: + // result: (SHRLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRL x (MOVLconst [c])) + // cond: + // result: (SHRLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRL x (MOVWconst [c])) + // cond: + // result: (SHRLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRL x (MOVBconst [c])) + // cond: + // result: (SHRLconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRLconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHRQ x (MOVQconst [c])) + // cond: + // result: (SHRQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHRQ x (MOVLconst [c])) + // cond: + // result: (SHRQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHRQ x (MOVWconst [c])) + // cond: + // result: (SHRQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + // match: (SHRQ x (MOVBconst [c])) + // cond: + // result: (SHRQconst [c&63] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRQconst) + v.AuxInt = c & 63 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SHRW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SHRW x (MOVQconst [c])) + // cond: + // result: (SHRWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRW x (MOVLconst [c])) + // cond: + // result: (SHRWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRW x (MOVWconst [c])) + // cond: + // result: (SHRWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + // match: (SHRW x (MOVBconst [c])) + // cond: + // result: (SHRWconst [c&31] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SHRWconst) + v.AuxInt = c & 31 + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBB x (MOVBconst [c])) + // cond: + // result: (SUBBconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SUBBconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (SUBB (MOVBconst [c]) x) + // cond: + // result: (NEGB (SUBBconst <v.Type> x [c])) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64NEGB) + v0 := b.NewValue0(v.Line, OpAMD64SUBBconst, v.Type) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + // match: (SUBB x x) + // cond: + // result: (MOVBconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBBconst [c] x) + // cond: int8(c) == 0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int8(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [d-c]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = d - c + return true + } + // match: (SUBBconst [c] (SUBBconst [d] x)) + // cond: + // result: (ADDBconst [-c-d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64SUBBconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDBconst) + v.AuxInt = -c - d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBL x (MOVLconst [c])) + // cond: + // result: (SUBLconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SUBLconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (SUBL (MOVLconst [c]) x) + // cond: + // result: (NEGL (SUBLconst <v.Type> x [c])) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64NEGL) + v0 := b.NewValue0(v.Line, OpAMD64SUBLconst, v.Type) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + // match: (SUBL x x) + // cond: + // result: (MOVLconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBLconst [c] x) + // cond: int32(c) == 0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [d-c]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = d - c + return true + } + // match: (SUBLconst [c] (SUBLconst [d] x)) + // cond: + // result: (ADDLconst [-c-d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64SUBLconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDLconst) + v.AuxInt = -c - d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (SUBQconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64SUBQconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (SUBQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (NEGQ (SUBQconst <v.Type> x [c])) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64NEGQ) + v0 := b.NewValue0(v.Line, OpAMD64SUBQconst, v.Type) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + // match: (SUBQ x x) + // cond: + // result: (MOVQconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBQconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [d-c]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = d - c + return true + } + // match: (SUBQconst [c] (SUBQconst [d] x)) + // cond: + // result: (ADDQconst [-c-d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64SUBQconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDQconst) + v.AuxInt = -c - d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBW x (MOVWconst [c])) + // cond: + // result: (SUBWconst x [c]) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64SUBWconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (SUBW (MOVWconst [c]) x) + // cond: + // result: (NEGW (SUBWconst <v.Type> x [c])) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64NEGW) + v0 := b.NewValue0(v.Line, OpAMD64SUBWconst, v.Type) + v0.AddArg(x) + v0.AuxInt = c + v.AddArg(v0) + return true + } + // match: (SUBW x x) + // cond: + // result: (MOVWconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVWconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SUBWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBWconst [c] x) + // cond: int16(c) == 0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int16(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [d-c]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = d - c + return true + } + // match: (SUBWconst [c] (SUBWconst [d] x)) + // cond: + // result: (ADDWconst [-c-d] x) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64SUBWconst { + break + } + d := v.Args[0].AuxInt + x := v.Args[0].Args[0] + v.reset(OpAMD64ADDWconst) + v.AuxInt = -c - d + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt16to32 x) + // cond: + // result: (MOVWQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVWQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt16to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt16to64 x) + // cond: + // result: (MOVWQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVWQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt32to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt32to64 x) + // cond: + // result: (MOVLQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVLQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to16 x) + // cond: + // result: (MOVBQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to32 x) + // cond: + // result: (MOVBQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSignExt8to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to64 x) + // cond: + // result: (MOVBQSX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQSX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpSqrt(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sqrt x) + // cond: + // result: (SQRTSD x) + for { + x := v.Args[0] + v.reset(OpAMD64SQRTSD) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpStaticCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (StaticCall [argwid] {target} mem) + // cond: + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpAMD64CALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpStore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Store [8] ptr val mem) + // cond: is64BitFloat(val.Type) + // result: (MOVSDstore ptr val mem) + for { + if v.AuxInt != 8 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is64BitFloat(val.Type)) { + break + } + v.reset(OpAMD64MOVSDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: is32BitFloat(val.Type) + // result: (MOVSSstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32BitFloat(val.Type)) { + break + } + v.reset(OpAMD64MOVSSstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [8] ptr val mem) + // cond: + // result: (MOVQstore ptr val mem) + for { + if v.AuxInt != 8 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVQstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: + // result: (MOVLstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [2] ptr val mem) + // cond: + // result: (MOVWstore ptr val mem) + for { + if v.AuxInt != 2 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [1] ptr val mem) + // cond: + // result: (MOVBstore ptr val mem) + for { + if v.AuxInt != 1 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpSub16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub16 x y) + // cond: + // result: (SUBW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSub32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32 x y) + // cond: + // result: (SUBL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSub32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32F x y) + // cond: + // result: (SUBSS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBSS) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSub64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub64 x y) + // cond: + // result: (SUBQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSub64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub64F x y) + // cond: + // result: (SUBSD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBSD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSub8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub8 x y) + // cond: + // result: (SUBB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpSubPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SubPtr x y) + // cond: + // result: (SUBQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64SUBQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc16to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc16to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc32to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc32to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc64to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc64to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to32 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpTrunc64to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc64to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORB x (MOVBconst [c])) + // cond: + // result: (XORBconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVBconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64XORBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORB (MOVBconst [c]) x) + // cond: + // result: (XORBconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64XORBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORB x x) + // cond: + // result: (MOVBconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVBconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORBconst [c] x) + // cond: int8(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int8(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORBconst [c] (MOVBconst [d])) + // cond: + // result: (MOVBconst [c^d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVBconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVBconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORL x (MOVLconst [c])) + // cond: + // result: (XORLconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVLconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64XORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORL (MOVLconst [c]) x) + // cond: + // result: (XORLconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64XORLconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORL x x) + // cond: + // result: (MOVLconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVLconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORLconst [c] x) + // cond: int32(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORLconst [c] (MOVLconst [d])) + // cond: + // result: (MOVLconst [c^d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVLconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVLconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQ(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORQ x (MOVQconst [c])) + // cond: is32Bit(c) + // result: (XORQconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVQconst { + break + } + c := v.Args[1].AuxInt + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64XORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORQ (MOVQconst [c]) x) + // cond: is32Bit(c) + // result: (XORQconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + if !(is32Bit(c)) { + break + } + v.reset(OpAMD64XORQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORQ x x) + // cond: + // result: (MOVQconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVQconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORQconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORQconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORQconst [c] (MOVQconst [d])) + // cond: + // result: (MOVQconst [c^d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVQconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVQconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORW(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORW x (MOVWconst [c])) + // cond: + // result: (XORWconst [c] x) + for { + x := v.Args[0] + if v.Args[1].Op != OpAMD64MOVWconst { + break + } + c := v.Args[1].AuxInt + v.reset(OpAMD64XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORW (MOVWconst [c]) x) + // cond: + // result: (XORWconst [c] x) + for { + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + c := v.Args[0].AuxInt + x := v.Args[1] + v.reset(OpAMD64XORWconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XORW x x) + // cond: + // result: (MOVWconst [0]) + for { + x := v.Args[0] + if v.Args[1] != x { + break + } + v.reset(OpAMD64MOVWconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueAMD64_OpAMD64XORWconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORWconst [c] x) + // cond: int16(c)==0 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int16(c) == 0) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORWconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c^d]) + for { + c := v.AuxInt + if v.Args[0].Op != OpAMD64MOVWconst { + break + } + d := v.Args[0].AuxInt + v.reset(OpAMD64MOVWconst) + v.AuxInt = c ^ d + return true + } + return false +} +func rewriteValueAMD64_OpXor16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor16 x y) + // cond: + // result: (XORW x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64XORW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpXor32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor32 x y) + // cond: + // result: (XORL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64XORL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpXor64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor64 x y) + // cond: + // result: (XORQ x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64XORQ) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpXor8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor8 x y) + // cond: + // result: (XORB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpAMD64XORB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueAMD64_OpZero(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Zero [0] _ mem) + // cond: + // result: mem + for { + if v.AuxInt != 0 { + break + } + mem := v.Args[1] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [1] destptr mem) + // cond: + // result: (MOVBstoreconst [0] destptr mem) + for { + if v.AuxInt != 1 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [2] destptr mem) + // cond: + // result: (MOVWstoreconst [0] destptr mem) + for { + if v.AuxInt != 2 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [4] destptr mem) + // cond: + // result: (MOVLstoreconst [0] destptr mem) + for { + if v.AuxInt != 4 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [8] destptr mem) + // cond: + // result: (MOVQstoreconst [0] destptr mem) + for { + if v.AuxInt != 8 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = 0 + v.AddArg(destptr) + v.AddArg(mem) + return true + } + // match: (Zero [3] destptr mem) + // cond: + // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem)) + for { + if v.AuxInt != 3 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = makeValAndOff(0, 2) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVWstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [5] destptr mem) + // cond: + // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) + for { + if v.AuxInt != 5 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVBstoreconst) + v.AuxInt = makeValAndOff(0, 4) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [6] destptr mem) + // cond: + // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem)) + for { + if v.AuxInt != 6 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVWstoreconst) + v.AuxInt = makeValAndOff(0, 4) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [7] destptr mem) + // cond: + // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem)) + for { + if v.AuxInt != 7 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVLstoreconst) + v.AuxInt = makeValAndOff(0, 3) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVLstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [size] destptr mem) + // cond: size%8 != 0 && size > 8 + // result: (Zero [size-size%8] (ADDQconst destptr [size%8]) (MOVQstoreconst [0] destptr mem)) + for { + size := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(size%8 != 0 && size > 8) { + break + } + v.reset(OpZero) + v.AuxInt = size - size%8 + v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) + v0.AddArg(destptr) + v0.AuxInt = size % 8 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v1.AuxInt = 0 + v1.AddArg(destptr) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [16] destptr mem) + // cond: + // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) + for { + if v.AuxInt != 16 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(0, 8) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [24] destptr mem) + // cond: + // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) + for { + if v.AuxInt != 24 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(0, 16) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v0.AuxInt = makeValAndOff(0, 8) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v1.AuxInt = 0 + v1.AddArg(destptr) + v1.AddArg(mem) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Zero [32] destptr mem) + // cond: + // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) + for { + if v.AuxInt != 32 { + break + } + destptr := v.Args[0] + mem := v.Args[1] + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(0, 24) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v0.AuxInt = makeValAndOff(0, 16) + v0.AddArg(destptr) + v1 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v1.AuxInt = makeValAndOff(0, 8) + v1.AddArg(destptr) + v2 := b.NewValue0(v.Line, OpAMD64MOVQstoreconst, TypeMem) + v2.AuxInt = 0 + v2.AddArg(destptr) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (Zero [size] destptr mem) + // cond: size <= 1024 && size%8 == 0 && size%16 != 0 + // result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) + for { + size := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(size <= 1024 && size%8 == 0 && size%16 != 0) { + break + } + v.reset(OpZero) + v.AuxInt = size - 8 + v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) + v0.AuxInt = 8 + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVQstore, TypeMem) + v1.AddArg(destptr) + v2 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [size] destptr mem) + // cond: size <= 1024 && size%16 == 0 + // result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem) + for { + size := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(size <= 1024 && size%16 == 0) { + break + } + v.reset(OpAMD64DUFFZERO) + v.AuxInt = duffStart(size) + v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64()) + v0.AuxInt = duffAdj(size) + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVOconst, TypeInt128) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + // match: (Zero [size] destptr mem) + // cond: size > 1024 && size%8 == 0 + // result: (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem) + for { + size := v.AuxInt + destptr := v.Args[0] + mem := v.Args[1] + if !(size > 1024 && size%8 == 0) { + break + } + v.reset(OpAMD64REPSTOSQ) + v.AddArg(destptr) + v0 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v0.AuxInt = size / 8 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpAMD64MOVQconst, config.fe.TypeUInt64()) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt16to32 x) + // cond: + // result: (MOVWQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt16to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt16to64 x) + // cond: + // result: (MOVWQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVWQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt32to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt32to64 x) + // cond: + // result: (MOVLQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVLQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to16 x) + // cond: + // result: (MOVBQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to32 x) + // cond: + // result: (MOVBQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteValueAMD64_OpZeroExt8to64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to64 x) + // cond: + // result: (MOVBQZX x) + for { + x := v.Args[0] + v.reset(OpAMD64MOVBQZX) + v.AddArg(x) + return true + } + return false +} +func rewriteBlockAMD64(b *Block) bool { + switch b.Kind { + case BlockAMD64EQ: + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64EQ + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (EQ (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (EQ (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (EQ (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (EQ (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (EQ (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + case BlockAMD64GE: + // match: (GE (InvertFlags cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (GE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (GE (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (GE (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (GE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (GE (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockAMD64GT: + // match: (GT (InvertFlags cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (GT (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (GT (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (GT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (GT (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockIf: + // match: (If (SETL cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETL { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETLE cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETLE { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETG cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETG { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETGE cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETGE { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETEQ cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETEQ { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64EQ + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETNE cmp) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETNE { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETB cmp) yes no) + // cond: + // result: (ULT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETB { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETBE cmp) yes no) + // cond: + // result: (ULE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETBE { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETA cmp) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETA { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETAE cmp) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETAE { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETGF cmp) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETGF { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETGEF cmp) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETGEF { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETEQF cmp) yes no) + // cond: + // result: (EQF cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETEQF { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64EQF + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If (SETNEF cmp) yes no) + // cond: + // result: (NEF cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64SETNEF { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NEF + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (TESTB cond cond) yes no) + for { + v := b.Control + cond := v + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NE + v0 := b.NewValue0(v.Line, OpAMD64TESTB, TypeFlags) + v0.AddArg(cond) + v0.AddArg(cond) + b.Control = v0 + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockAMD64LE: + // match: (LE (InvertFlags cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LE (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (LE (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + case BlockAMD64LT: + // match: (LT (InvertFlags cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (LT (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LT (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (LT (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (LT (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + case BlockAMD64NE: + // match: (NE (TESTB (SETL cmp)) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETL { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETLE cmp)) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETLE { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64LE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETG cmp)) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETG { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETGE cmp)) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETGE { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64GE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETEQ cmp)) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETEQ { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64EQ + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETNE cmp)) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETNE { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETB cmp)) yes no) + // cond: + // result: (ULT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETB { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETBE cmp)) yes no) + // cond: + // result: (ULE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETBE { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETA cmp)) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETA { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETAE cmp)) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETAE { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETGF cmp)) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETGF { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETGEF cmp)) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETGEF { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETEQF cmp)) yes no) + // cond: + // result: (EQF cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETEQF { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64EQF + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (TESTB (SETNEF cmp)) yes no) + // cond: + // result: (NEF cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64TESTB { + break + } + if v.Args[0].Op != OpAMD64SETNEF { + break + } + cmp := v.Args[0].Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NEF + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (InvertFlags cmp) yes no) + // cond: + // result: (NE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64NE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (NE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (NE (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockAMD64UGE: + // match: (UGE (InvertFlags cmp) yes no) + // cond: + // result: (ULE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (UGE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (UGE (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (UGE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (UGE (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (UGE (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockAMD64UGT: + // match: (UGT (InvertFlags cmp) yes no) + // cond: + // result: (ULT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64ULT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (UGT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (UGT (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (UGT (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (UGT (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (UGT (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + case BlockAMD64ULE: + // match: (ULE (InvertFlags cmp) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGE + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULE (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (ULE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULE (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + case BlockAMD64ULT: + // match: (ULT (InvertFlags cmp) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpAMD64InvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockAMD64UGT + b.Control = cmp + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (ULT (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULT (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + // match: (ULT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = yes + b.Succs[1] = no + return true + } + // match: (ULT (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpAMD64FlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.Control = nil + b.Succs[0] = no + b.Succs[1] = yes + b.Likely *= -1 + return true + } + } + return false +} |
