From d74de3ce79d4ab3495650bfcc4682cab09514b89 Mon Sep 17 00:00:00 2001 From: "Jayanth Krishnamurthy jayanth.krishnamurthy@ibm.com" Date: Mon, 15 Sep 2025 17:33:42 -0500 Subject: cmd/compile: improve uint8/uint16 logical immediates on PPC64 Logical ops on uint8/uint16 (AND/OR/XOR) with constants sometimes materialized the mask via MOVD (often as a negative immediate), even when the value fit in the UI-immediate range. This prevented the backend from selecting andi. / ori / xori forms. This CL makes: UI-immediate truncation is performed only at the use-site of logical-immediate ops, and only when the constant does not fit in the 8- or 16-bit unsigned domain (m != uint8(m) / m != uint16(m)). This avoids negative-mask materialization and enables correct emission of UI-form logical instructions. Arithmetic SI-immediate instructions (addi, subfic, etc.) and other use-patterns are unchanged. Codegen tests are added to ensure the expected andi./ori/xori patterns appear and that MOVD is not emitted for valid 8/16-bit masks. Change-Id: I9fcdf4498c4e984c7587814fb9019a75865c4a0d Cq-Include-Trybots: luci.golang.try:gotip-linux-ppc64_power10,gotip-linux-ppc64_power8,gotip-linux-ppc64le_power8,gotip-linux-ppc64le_power9,gotip-linux-ppc64le_power10 Reviewed-on: https://go-review.googlesource.com/c/go/+/704015 LUCI-TryBot-Result: Go LUCI Reviewed-by: Paul Murphy Reviewed-by: Cherry Mui Reviewed-by: Mark Freeman --- src/cmd/compile/internal/ssa/rewritePPC64.go | 126 +++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) (limited to 'src/cmd/compile/internal/ssa/rewritePPC64.go') diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 6a7df42546..181494e669 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -4398,6 +4398,48 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + // match: (AND x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m)) + // result: (ANDconst [int64(uint8(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m))) { + continue + } + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(int64(uint8(m))) + v.AddArg(x) + return true + } + break + } + // match: (AND x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m)) + // result: (ANDconst [int64(uint16(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m))) { + continue + } + v.reset(OpPPC64ANDconst) + v.AuxInt = int64ToAuxInt(int64(uint16(m))) + v.AddArg(x) + return true + } + break + } // match: (AND (MOVDconst [m]) (ROTLWconst [r] x)) // cond: isPPC64WordRotateMask(m) // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) @@ -11739,6 +11781,48 @@ func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool { func rewriteValuePPC64_OpPPC64OR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + // match: (OR x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m)) + // result: (ORconst [int64(uint8(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m))) { + continue + } + v.reset(OpPPC64ORconst) + v.AuxInt = int64ToAuxInt(int64(uint8(m))) + v.AddArg(x) + return true + } + break + } + // match: (OR x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m)) + // result: (ORconst [int64(uint16(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m))) { + continue + } + v.reset(OpPPC64ORconst) + v.AuxInt = int64ToAuxInt(int64(uint16(m))) + v.AddArg(x) + return true + } + break + } // match: (OR x (NOR y y)) // result: (ORN x y) for { @@ -13082,6 +13166,48 @@ func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool { func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + // match: (XOR x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m)) + // result: (XORconst [int64(uint8(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 1 && m != int64(uint8(m))) { + continue + } + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(int64(uint8(m))) + v.AddArg(x) + return true + } + break + } + // match: (XOR x (MOVDconst [m])) + // cond: t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m)) + // result: (XORconst [int64(uint16(m))] x) + for { + t := v.Type + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpPPC64MOVDconst { + continue + } + m := auxIntToInt64(v_1.AuxInt) + if !(t.IsUnsigned() && t.Size() == 2 && m != int64(uint16(m))) { + continue + } + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(int64(uint16(m))) + v.AddArg(x) + return true + } + break + } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // result: (MOVDconst [c^d]) for { -- cgit v1.3-6-g1900